From 7ef3a9c956a6921e76b361418e1d13ad2c30eb37 Mon Sep 17 00:00:00 2001 From: mbaldessari Date: Fri, 8 Nov 2024 10:11:59 +0000 Subject: [PATCH] deploy: c8bf40b0d0cb00e4f19bbceb04a9f2542b4dbfae --- blog/2021-12-31-medical-diagnosis/index.html | 4 ++-- blog/2022-03-23-acm-mustonlyhave/index.html | 4 ++-- blog/2022-03-30-multicloud-gitops/index.html | 4 ++-- .../2022-06-30-ansible-edge-gitops/index.html | 4 ++-- blog/2022-07-15-push-vs-pull/index.html | 4 ++-- blog/2022-08-24-clustergroups/index.html | 4 ++-- blog/2022-09-02-route/index.html | 4 ++-- blog/2022-10-12-acm-provisioning/index.html | 4 ++-- blog/2022-11-20-argo-rollouts/index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- blog/2023-12-01-new-pattern-tiers/index.html | 4 ++-- blog/2023-12-05-nutanix-testing/index.html | 4 ++-- .../index.html | 4 ++-- blog/2023-12-20-private-repos/index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../2024-02-07-hcp-htpasswd-config/index.html | 4 ++-- .../index.html | 4 ++-- blog/2024-07-12-in-cluster-git/index.html | 4 ++-- .../index.html | 4 ++-- blog/2024-08-30-push-secrets/index.html | 4 ++-- blog/2024-09-13-using-hypershift/index.html | 4 ++-- blog/2024-09-26-slimming-of-common/index.html | 4 ++-- blog/2024-10-12-disconnected/index.html | 4 ++-- .../index.html | 22 ++----------------- blog/index.html | 4 ++-- blog/page/2/index.html | 4 ++-- blog/page/3/index.html | 4 ++-- blog/page/4/index.html | 4 ++-- blog/page/5/index.html | 4 ++-- blog_tags/acm/index.html | 4 ++-- blog_tags/acs/index.html | 4 ++-- blog_tags/announce/index.html | 4 ++-- blog_tags/ansible-edge-gitops/index.html | 4 ++-- blog_tags/devops/index.html | 4 ++-- blog_tags/devsecops/index.html | 4 ++-- blog_tags/git/index.html | 4 ++-- blog_tags/gitops/index.html | 4 ++-- blog_tags/how-to/index.html | 4 ++-- blog_tags/index.html | 4 ++-- blog_tags/medical-diagnosis/index.html | 4 ++-- blog_tags/multi-cloud-gitops/index.html | 4 ++-- blog_tags/nutanix/index.html | 4 ++-- blog_tags/openshift-platform-plus/index.html | 4 ++-- blog_tags/openshift/index.html | 4 ++-- blog_tags/page/2/index.html | 4 ++-- blog_tags/page/3/index.html | 4 ++-- blog_tags/page/4/index.html | 4 ++-- blog_tags/page/5/index.html | 4 ++-- blog_tags/patterns/index.html | 4 ++-- blog_tags/patterns/page/2/index.html | 4 ++-- blog_tags/patterns/page/3/index.html | 4 ++-- blog_tags/patterns/page/4/index.html | 4 ++-- blog_tags/pipelines/index.html | 4 ++-- blog_tags/provisioning/index.html | 4 ++-- blog_tags/quay/index.html | 4 ++-- blog_tags/route/index.html | 4 ++-- blog_tags/secrets/index.html | 4 ++-- blog_tags/security/index.html | 4 ++-- blog_tags/sequencing/index.html | 4 ++-- blog_tags/subscriptions/index.html | 4 ++-- blog_tags/validated-pattern/index.html | 4 ++-- blog_tags/xray/index.html | 4 ++-- ci/index.html | 4 ++-- ci/internal/index.html | 4 ++-- .../index.html | 4 ++-- contribute/contribute-to-docs/index.html | 4 ++-- contribute/creating-a-pattern/index.html | 4 ++-- contribute/extending-a-pattern/index.html | 4 ++-- contribute/index.html | 4 ++-- contribute/support-policies/index.html | 4 ++-- index.html | 4 ++-- learn/about-pattern-tiers-types/index.html | 4 ++-- learn/about-validated-patterns/index.html | 4 ++-- learn/clustergroup-in-values-files/index.html | 4 ++-- learn/faq/index.html | 4 ++-- learn/implementation/index.html | 4 ++-- learn/importing-a-cluster/index.html | 4 ++-- learn/index.html | 4 ++-- learn/infrastructure/index.html | 4 ++-- learn/keyconcepts/index.html | 4 ++-- learn/maintained/index.html | 4 ++-- learn/ocp-cluster-general-sizing/index.html | 4 ++-- learn/page/2/index.html | 4 ++-- learn/page/3/index.html | 4 ++-- learn/page/4/index.html | 4 ++-- learn/page/5/index.html | 4 ++-- learn/quickstart/index.html | 4 ++-- learn/sandbox/index.html | 4 ++-- .../index.html | 4 ++-- learn/secrets/index.html | 4 ++-- learn/test-artifacts/index.html | 4 ++-- learn/tested/index.html | 4 ++-- .../index.html | 4 ++-- .../validated_patterns_frameworks/index.html | 4 ++-- learn/values-files/index.html | 4 ++-- learn/vault/index.html | 4 ++-- learn/vp_agof/index.html | 4 ++-- learn/vp_agof_config_controller/index.html | 4 ++-- learn/vp_openshift_framework/index.html | 4 ++-- learn/vp_structure_vp_pattern/index.html | 4 ++-- learn/workflow/index.html | 4 ++-- .../ansible-automation-platform/index.html | 4 ++-- .../cluster-sizing/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- .../ansible-edge-gitops-kasten/index.html | 4 ++-- .../installation-details/index.html | 4 ++-- .../openshift-virtualization/index.html | 4 ++-- .../troubleshooting/index.html | 4 ++-- .../veeam-kasten/index.html | 4 ++-- .../ansible-automation-platform/index.html | 4 ++-- .../cluster-sizing/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/ansible-edge-gitops/index.html | 4 ++-- .../installation-details/index.html | 4 ++-- .../openshift-virtualization/index.html | 4 ++-- .../troubleshooting/index.html | 4 ++-- patterns/ansible-gitops-framework/index.html | 4 ++-- patterns/cockroachdb/index.html | 4 ++-- .../connected-vehicle-architecture/index.html | 4 ++-- patterns/devsecops/cluster-sizing/index.html | 4 ++-- patterns/devsecops/devel-cluster/index.html | 4 ++-- patterns/devsecops/getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/devsecops/index.html | 4 ++-- .../devsecops/production-cluster/index.html | 4 ++-- .../secure-supply-chain-demo/index.html | 4 ++-- .../edd-getting-started/index.html | 4 ++-- .../emerging-disease-detection/index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- patterns/gaudi-rag-chat-qna/index.html | 4 ++-- patterns/hypershift/index.html | 4 ++-- patterns/index.html | 4 ++-- .../industrial-edge/application/index.html | 4 ++-- .../industrial-edge/cluster-sizing/index.html | 4 ++-- .../industrial-edge/demo-script/index.html | 4 ++-- patterns/industrial-edge/factory/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/industrial-edge/index.html | 4 ++-- .../troubleshooting/index.html | 4 ++-- patterns/kong-gateway/index.html | 4 ++-- .../cluster-sizing/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/medical-diagnosis-amx/index.html | 4 ++-- .../troubleshooting/index.html | 4 ++-- .../cluster-sizing/index.html | 4 ++-- .../medical-diagnosis/demo-script/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/medical-diagnosis/index.html | 4 ++-- .../troubleshooting/index.html | 4 ++-- patterns/mlops-fraud-detection/index.html | 4 ++-- .../mfd-getting-started/index.html | 4 ++-- .../mfd-running-the-demo/index.html | 4 ++-- .../multicloud-gitops-amx-rhoai/index.html | 4 ++-- .../mcg-amx-rhoai-bert-script/index.html | 4 ++-- .../mcg-amx-rhoai-cluster-sizing/index.html | 4 ++-- .../mcg-amx-rhoai-demo-script/index.html | 4 ++-- .../mcg-amx-rhoai-getting-started/index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../mcg-amx-rhoai-managed-cluster/index.html | 4 ++-- patterns/multicloud-gitops-amx/index.html | 4 ++-- .../mcg-amx-cluster-sizing/index.html | 4 ++-- .../mcg-amx-getting-started/index.html | 4 ++-- .../index.html | 4 ++-- .../mcg-amx-imperative-actions/index.html | 4 ++-- .../mcg-amx-managed-cluster/index.html | 4 ++-- .../cluster-sizing/index.html | 4 ++-- .../getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- .../multicloud-gitops-portworx/index.html | 4 ++-- .../managed-cluster/index.html | 4 ++-- patterns/multicloud-gitops-qat/index.html | 4 ++-- .../mcg-qat-cluster-sizing/index.html | 4 ++-- .../mcg-qat-getting-started/index.html | 4 ++-- .../index.html | 4 ++-- .../mcg-qat-imperative-actions/index.html | 4 ++-- .../mcg-qat-managed-cluster/index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- .../index.html | 4 ++-- patterns/multicloud-gitops-sgx/index.html | 4 ++-- .../mcg-sgx-cluster-sizing/index.html | 4 ++-- .../mcg-sgx-demo-script/index.html | 4 ++-- .../mcg-sgx-getting-started/index.html | 4 ++-- .../index.html | 4 ++-- .../mcg-sgx-imperative-actions/index.html | 4 ++-- .../mcg-sgx-managed-cluster/index.html | 4 ++-- patterns/multicloud-gitops/index.html | 4 ++-- .../mcg-cluster-sizing/index.html | 4 ++-- .../mcg-demo-script/index.html | 4 ++-- .../mcg-getting-started/index.html | 4 ++-- .../mcg-ideas-for-customization/index.html | 4 ++-- .../mcg-imperative-actions/index.html | 4 ++-- .../mcg-managed-cluster/index.html | 4 ++-- .../openshift-ai/getting-started/index.html | 4 ++-- patterns/openshift-ai/index.html | 4 ++-- .../rag-llm-gitops/getting-started/index.html | 4 ++-- .../gpu_provisioning/index.html | 4 ++-- patterns/rag-llm-gitops/index.html | 4 ++-- patterns/retail/application/index.html | 4 ++-- patterns/retail/cluster-sizing/index.html | 4 ++-- patterns/retail/components/index.html | 4 ++-- patterns/retail/getting-started/index.html | 4 ++-- .../retail/ideas-for-customization/index.html | 4 ++-- patterns/retail/index.html | 4 ++-- patterns/retail/store/index.html | 4 ++-- patterns/retail/troubleshooting/index.html | 4 ++-- patterns/travelops/demo-script/index.html | 4 ++-- patterns/travelops/getting-started/index.html | 4 ++-- .../ideas-for-customization/index.html | 4 ++-- patterns/travelops/index.html | 4 ++-- search/index.html | 4 ++-- 225 files changed, 450 insertions(+), 468 deletions(-) diff --git a/blog/2021-12-31-medical-diagnosis/index.html b/blog/2021-12-31-medical-diagnosis/index.html index d941cb5a5..40766a3f8 100644 --- a/blog/2021-12-31-medical-diagnosis/index.html +++ b/blog/2021-12-31-medical-diagnosis/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-03-23-acm-mustonlyhave/index.html b/blog/2022-03-23-acm-mustonlyhave/index.html index 65fff5b8b..ec6af5060 100644 --- a/blog/2022-03-23-acm-mustonlyhave/index.html +++ b/blog/2022-03-23-acm-mustonlyhave/index.html @@ -3701,11 +3701,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-03-30-multicloud-gitops/index.html b/blog/2022-03-30-multicloud-gitops/index.html index a71b0d640..76999b566 100644 --- a/blog/2022-03-30-multicloud-gitops/index.html +++ b/blog/2022-03-30-multicloud-gitops/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-06-30-ansible-edge-gitops/index.html b/blog/2022-06-30-ansible-edge-gitops/index.html index 72e315ec4..5cdff3129 100644 --- a/blog/2022-06-30-ansible-edge-gitops/index.html +++ b/blog/2022-06-30-ansible-edge-gitops/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-07-15-push-vs-pull/index.html b/blog/2022-07-15-push-vs-pull/index.html index 993c4c616..b74296eb3 100644 --- a/blog/2022-07-15-push-vs-pull/index.html +++ b/blog/2022-07-15-push-vs-pull/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-08-24-clustergroups/index.html b/blog/2022-08-24-clustergroups/index.html index a0dda3cf9..88d9f3e39 100644 --- a/blog/2022-08-24-clustergroups/index.html +++ b/blog/2022-08-24-clustergroups/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-09-02-route/index.html b/blog/2022-09-02-route/index.html index 46913e047..b73defa37 100644 --- a/blog/2022-09-02-route/index.html +++ b/blog/2022-09-02-route/index.html @@ -3701,11 +3701,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-10-12-acm-provisioning/index.html b/blog/2022-10-12-acm-provisioning/index.html index 62f0207cc..1c68b4712 100644 --- a/blog/2022-10-12-acm-provisioning/index.html +++ b/blog/2022-10-12-acm-provisioning/index.html @@ -3711,11 +3711,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-11-20-argo-rollouts/index.html b/blog/2022-11-20-argo-rollouts/index.html index 70ff75e4e..fc7562fab 100644 --- a/blog/2022-11-20-argo-rollouts/index.html +++ b/blog/2022-11-20-argo-rollouts/index.html @@ -3827,11 +3827,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2022-12-01-multicluster-devsecops/index.html b/blog/2022-12-01-multicluster-devsecops/index.html index 516dc93c6..13023b682 100644 --- a/blog/2022-12-01-multicluster-devsecops/index.html +++ b/blog/2022-12-01-multicluster-devsecops/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2023-11-17-argo-configmanagement-plugins/index.html b/blog/2023-11-17-argo-configmanagement-plugins/index.html index ba11e6619..a143f61e2 100644 --- a/blog/2023-11-17-argo-configmanagement-plugins/index.html +++ b/blog/2023-11-17-argo-configmanagement-plugins/index.html @@ -3709,11 +3709,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2023-12-01-new-pattern-tiers/index.html b/blog/2023-12-01-new-pattern-tiers/index.html index 220b32d52..8e0743fcf 100644 --- a/blog/2023-12-01-new-pattern-tiers/index.html +++ b/blog/2023-12-01-new-pattern-tiers/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2023-12-05-nutanix-testing/index.html b/blog/2023-12-05-nutanix-testing/index.html index 8104479f9..ac598d4dc 100644 --- a/blog/2023-12-05-nutanix-testing/index.html +++ b/blog/2023-12-05-nutanix-testing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2023-12-15-understanding-namespaces/index.html b/blog/2023-12-15-understanding-namespaces/index.html index 200749eee..a119f82cc 100644 --- a/blog/2023-12-15-understanding-namespaces/index.html +++ b/blog/2023-12-15-understanding-namespaces/index.html @@ -3710,11 +3710,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2023-12-20-private-repos/index.html b/blog/2023-12-20-private-repos/index.html index 65e61523e..101d91e30 100644 --- a/blog/2023-12-20-private-repos/index.html +++ b/blog/2023-12-20-private-repos/index.html @@ -3667,11 +3667,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-01-16-deploying-mcg-with-cisco-flashstack-portworx/index.html b/blog/2024-01-16-deploying-mcg-with-cisco-flashstack-portworx/index.html index 7fbb97f27..f4e926e46 100644 --- a/blog/2024-01-16-deploying-mcg-with-cisco-flashstack-portworx/index.html +++ b/blog/2024-01-16-deploying-mcg-with-cisco-flashstack-portworx/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-01-26-more-secrets-options/index.html b/blog/2024-01-26-more-secrets-options/index.html index b6e8d1784..d16cee656 100644 --- a/blog/2024-01-26-more-secrets-options/index.html +++ b/blog/2024-01-26-more-secrets-options/index.html @@ -3766,11 +3766,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-02-07-hcp-htpasswd-config/index.html b/blog/2024-02-07-hcp-htpasswd-config/index.html index f78e7996a..f0867c24f 100644 --- a/blog/2024-02-07-hcp-htpasswd-config/index.html +++ b/blog/2024-02-07-hcp-htpasswd-config/index.html @@ -3653,11 +3653,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-03-05-intel-accelerated-patterns/index.html b/blog/2024-03-05-intel-accelerated-patterns/index.html index f66292dab..e0862b758 100644 --- a/blog/2024-03-05-intel-accelerated-patterns/index.html +++ b/blog/2024-03-05-intel-accelerated-patterns/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-07-12-in-cluster-git/index.html b/blog/2024-07-12-in-cluster-git/index.html index 8249e2e9b..7d4f25bc8 100644 --- a/blog/2024-07-12-in-cluster-git/index.html +++ b/blog/2024-07-12-in-cluster-git/index.html @@ -3663,11 +3663,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-07-19-write-token-kubeconfig/index.html b/blog/2024-07-19-write-token-kubeconfig/index.html index 799c02392..471602d1e 100644 --- a/blog/2024-07-19-write-token-kubeconfig/index.html +++ b/blog/2024-07-19-write-token-kubeconfig/index.html @@ -3636,11 +3636,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-08-30-push-secrets/index.html b/blog/2024-08-30-push-secrets/index.html index a62bbb2f9..6ffdb95a0 100644 --- a/blog/2024-08-30-push-secrets/index.html +++ b/blog/2024-08-30-push-secrets/index.html @@ -3669,11 +3669,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-09-13-using-hypershift/index.html b/blog/2024-09-13-using-hypershift/index.html index 1999c06c6..d3e5b1d1b 100644 --- a/blog/2024-09-13-using-hypershift/index.html +++ b/blog/2024-09-13-using-hypershift/index.html @@ -3687,11 +3687,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-09-26-slimming-of-common/index.html b/blog/2024-09-26-slimming-of-common/index.html index d01333e14..f77b477c1 100644 --- a/blog/2024-09-26-slimming-of-common/index.html +++ b/blog/2024-09-26-slimming-of-common/index.html @@ -3685,11 +3685,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-10-12-disconnected/index.html b/blog/2024-10-12-disconnected/index.html index c074e37c3..25820f7f6 100644 --- a/blog/2024-10-12-disconnected/index.html +++ b/blog/2024-10-12-disconnected/index.html @@ -3723,11 +3723,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/2024-11-07-clustergroup-sequencing/index.html b/blog/2024-11-07-clustergroup-sequencing/index.html index f554170f8..ed9cb64b4 100644 --- a/blog/2024-11-07-clustergroup-sequencing/index.html +++ b/blog/2024-11-07-clustergroup-sequencing/index.html @@ -56,17 +56,11 @@ metadata: annotations: argocd.argoproj.io/sync-wave: "10" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} - creationTimestamp: "2024-11-07T14:24:31Z" - generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv - resourceVersion: "46763" - uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic @@ -79,17 +73,11 @@ metadata: annotations: argocd.argoproj.io/sync-wave: "5" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} - creationTimestamp: "2024-11-07T14:21:12Z" - generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage - resourceVersion: "56652" - uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator @@ -131,16 +119,10 @@ annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\ndo\n oc get sc ocs-storagecluster-ceph-rbd \u0026\u0026 break\n echo \"sc ocs-storagecluster-ceph-rbd not found, waiting...\"\n sleep 5\ndone\necho \"sc ocs-storagecluster-ceph-rbd found, exiting...\"\nexit 0\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} - creationTimestamp: "2024-11-07T16:27:26Z" - generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators - resourceVersion: "201283" - uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed @@ -3845,11 +3827,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/index.html b/blog/index.html index 90f080922..135ddaf39 100644 --- a/blog/index.html +++ b/blog/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/page/2/index.html b/blog/page/2/index.html index 6f139bb6d..6165c0f06 100644 --- a/blog/page/2/index.html +++ b/blog/page/2/index.html @@ -3626,11 +3626,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/page/3/index.html b/blog/page/3/index.html index ed67170d2..34dd2f7af 100644 --- a/blog/page/3/index.html +++ b/blog/page/3/index.html @@ -3637,11 +3637,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/page/4/index.html b/blog/page/4/index.html index d11909132..b1bd0bd83 100644 --- a/blog/page/4/index.html +++ b/blog/page/4/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog/page/5/index.html b/blog/page/5/index.html index 509b60654..96f946b0b 100644 --- a/blog/page/5/index.html +++ b/blog/page/5/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/acm/index.html b/blog_tags/acm/index.html index 19841a965..2778ae9bd 100644 --- a/blog_tags/acm/index.html +++ b/blog_tags/acm/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/acs/index.html b/blog_tags/acs/index.html index ca84bf4d1..2765c6dc6 100644 --- a/blog_tags/acs/index.html +++ b/blog_tags/acs/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/announce/index.html b/blog_tags/announce/index.html index 9cb3b5ea0..0f87d7a73 100644 --- a/blog_tags/announce/index.html +++ b/blog_tags/announce/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/ansible-edge-gitops/index.html b/blog_tags/ansible-edge-gitops/index.html index 9dcdea7dc..bb8c73453 100644 --- a/blog_tags/ansible-edge-gitops/index.html +++ b/blog_tags/ansible-edge-gitops/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/devops/index.html b/blog_tags/devops/index.html index c44601ebd..92d86f087 100644 --- a/blog_tags/devops/index.html +++ b/blog_tags/devops/index.html @@ -3634,11 +3634,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/devsecops/index.html b/blog_tags/devsecops/index.html index afc1d9855..d1f4835fb 100644 --- a/blog_tags/devsecops/index.html +++ b/blog_tags/devsecops/index.html @@ -3634,11 +3634,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/git/index.html b/blog_tags/git/index.html index 63da0467b..16be8e096 100644 --- a/blog_tags/git/index.html +++ b/blog_tags/git/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/gitops/index.html b/blog_tags/gitops/index.html index 9678cf99a..47afeabbf 100644 --- a/blog_tags/gitops/index.html +++ b/blog_tags/gitops/index.html @@ -3633,11 +3633,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/how-to/index.html b/blog_tags/how-to/index.html index aab106a1e..1dd1a7272 100644 --- a/blog_tags/how-to/index.html +++ b/blog_tags/how-to/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/index.html b/blog_tags/index.html index fe357597f..614163785 100644 --- a/blog_tags/index.html +++ b/blog_tags/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/medical-diagnosis/index.html b/blog_tags/medical-diagnosis/index.html index a5a6fc9d1..e7a0c94ed 100644 --- a/blog_tags/medical-diagnosis/index.html +++ b/blog_tags/medical-diagnosis/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/multi-cloud-gitops/index.html b/blog_tags/multi-cloud-gitops/index.html index c1baaec4d..137e7e625 100644 --- a/blog_tags/multi-cloud-gitops/index.html +++ b/blog_tags/multi-cloud-gitops/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/nutanix/index.html b/blog_tags/nutanix/index.html index 5d1811c86..a93c60100 100644 --- a/blog_tags/nutanix/index.html +++ b/blog_tags/nutanix/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/openshift-platform-plus/index.html b/blog_tags/openshift-platform-plus/index.html index 010df4320..dbc60d435 100644 --- a/blog_tags/openshift-platform-plus/index.html +++ b/blog_tags/openshift-platform-plus/index.html @@ -3634,11 +3634,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/openshift/index.html b/blog_tags/openshift/index.html index e86d37878..20aab3e7b 100644 --- a/blog_tags/openshift/index.html +++ b/blog_tags/openshift/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/page/2/index.html b/blog_tags/page/2/index.html index 2291955e7..6e1d22757 100644 --- a/blog_tags/page/2/index.html +++ b/blog_tags/page/2/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/page/3/index.html b/blog_tags/page/3/index.html index 84125808b..ea1451310 100644 --- a/blog_tags/page/3/index.html +++ b/blog_tags/page/3/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/page/4/index.html b/blog_tags/page/4/index.html index 54be84158..8d55935f1 100644 --- a/blog_tags/page/4/index.html +++ b/blog_tags/page/4/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/page/5/index.html b/blog_tags/page/5/index.html index 30282bcf2..9f6aafa27 100644 --- a/blog_tags/page/5/index.html +++ b/blog_tags/page/5/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/patterns/index.html b/blog_tags/patterns/index.html index be08f8581..cb06ccc4a 100644 --- a/blog_tags/patterns/index.html +++ b/blog_tags/patterns/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/patterns/page/2/index.html b/blog_tags/patterns/page/2/index.html index 8022c9201..8c45e1fb5 100644 --- a/blog_tags/patterns/page/2/index.html +++ b/blog_tags/patterns/page/2/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/patterns/page/3/index.html b/blog_tags/patterns/page/3/index.html index 810be1427..2cf4360cb 100644 --- a/blog_tags/patterns/page/3/index.html +++ b/blog_tags/patterns/page/3/index.html @@ -3638,11 +3638,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/patterns/page/4/index.html b/blog_tags/patterns/page/4/index.html index 09ce35ae9..b42f8a5e9 100644 --- a/blog_tags/patterns/page/4/index.html +++ b/blog_tags/patterns/page/4/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/pipelines/index.html b/blog_tags/pipelines/index.html index 5b097c36a..1c5d8f3cc 100644 --- a/blog_tags/pipelines/index.html +++ b/blog_tags/pipelines/index.html @@ -3634,11 +3634,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/provisioning/index.html b/blog_tags/provisioning/index.html index d3d8a091a..d3d634dde 100644 --- a/blog_tags/provisioning/index.html +++ b/blog_tags/provisioning/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/quay/index.html b/blog_tags/quay/index.html index 9558522d5..afb17dbf6 100644 --- a/blog_tags/quay/index.html +++ b/blog_tags/quay/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/route/index.html b/blog_tags/route/index.html index 346704410..3e91c0951 100644 --- a/blog_tags/route/index.html +++ b/blog_tags/route/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/secrets/index.html b/blog_tags/secrets/index.html index 6f3d8d54d..35c7bff2d 100644 --- a/blog_tags/secrets/index.html +++ b/blog_tags/secrets/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/security/index.html b/blog_tags/security/index.html index 0bd783bd2..168a25af2 100644 --- a/blog_tags/security/index.html +++ b/blog_tags/security/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/sequencing/index.html b/blog_tags/sequencing/index.html index a0551a58a..c17db1817 100644 --- a/blog_tags/sequencing/index.html +++ b/blog_tags/sequencing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/subscriptions/index.html b/blog_tags/subscriptions/index.html index d9e099022..60b330f18 100644 --- a/blog_tags/subscriptions/index.html +++ b/blog_tags/subscriptions/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/validated-pattern/index.html b/blog_tags/validated-pattern/index.html index 363ca877c..5e4a8a46b 100644 --- a/blog_tags/validated-pattern/index.html +++ b/blog_tags/validated-pattern/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/blog_tags/xray/index.html b/blog_tags/xray/index.html index f03145435..569f5cd52 100644 --- a/blog_tags/xray/index.html +++ b/blog_tags/xray/index.html @@ -3626,11 +3626,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/ci/index.html b/ci/index.html index 0e5f43040..ae06cbd8d 100644 --- a/ci/index.html +++ b/ci/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/ci/internal/index.html b/ci/internal/index.html index 3d87c1e58..e9ca2b1f1 100644 --- a/ci/internal/index.html +++ b/ci/internal/index.html @@ -3626,11 +3626,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/background-on-pattern-development/index.html b/contribute/background-on-pattern-development/index.html index 88d99f0bd..65f3a7406 100644 --- a/contribute/background-on-pattern-development/index.html +++ b/contribute/background-on-pattern-development/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/contribute-to-docs/index.html b/contribute/contribute-to-docs/index.html index 0314b83bb..a5c5910c2 100644 --- a/contribute/contribute-to-docs/index.html +++ b/contribute/contribute-to-docs/index.html @@ -3680,11 +3680,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/creating-a-pattern/index.html b/contribute/creating-a-pattern/index.html index 2251e0966..1c64d5439 100644 --- a/contribute/creating-a-pattern/index.html +++ b/contribute/creating-a-pattern/index.html @@ -3709,11 +3709,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/extending-a-pattern/index.html b/contribute/extending-a-pattern/index.html index 3bd1aca63..4d4d29148 100644 --- a/contribute/extending-a-pattern/index.html +++ b/contribute/extending-a-pattern/index.html @@ -3726,11 +3726,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/index.html b/contribute/index.html index 62638c26c..f818c2b36 100644 --- a/contribute/index.html +++ b/contribute/index.html @@ -3621,11 +3621,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/contribute/support-policies/index.html b/contribute/support-policies/index.html index b4481f305..24c8027e3 100644 --- a/contribute/support-policies/index.html +++ b/contribute/support-policies/index.html @@ -3622,11 +3622,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/index.html b/index.html index 6d6314fe8..3ebd89587 100644 --- a/index.html +++ b/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/about-pattern-tiers-types/index.html b/learn/about-pattern-tiers-types/index.html index 99d0ecc15..b3c377797 100644 --- a/learn/about-pattern-tiers-types/index.html +++ b/learn/about-pattern-tiers-types/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/about-validated-patterns/index.html b/learn/about-validated-patterns/index.html index 7ed4268f0..5dc3589d9 100644 --- a/learn/about-validated-patterns/index.html +++ b/learn/about-validated-patterns/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/clustergroup-in-values-files/index.html b/learn/clustergroup-in-values-files/index.html index 4cdbad7dc..0116bf1c7 100644 --- a/learn/clustergroup-in-values-files/index.html +++ b/learn/clustergroup-in-values-files/index.html @@ -3761,11 +3761,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/faq/index.html b/learn/faq/index.html index bcd7164c2..d2fab21de 100644 --- a/learn/faq/index.html +++ b/learn/faq/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/implementation/index.html b/learn/implementation/index.html index a282cd7df..c99b89e80 100644 --- a/learn/implementation/index.html +++ b/learn/implementation/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/importing-a-cluster/index.html b/learn/importing-a-cluster/index.html index 8d855e5ac..a3d83c95e 100644 --- a/learn/importing-a-cluster/index.html +++ b/learn/importing-a-cluster/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/index.html b/learn/index.html index b562f884b..faf12fd2c 100644 --- a/learn/index.html +++ b/learn/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/infrastructure/index.html b/learn/infrastructure/index.html index 536473492..16c35a183 100644 --- a/learn/infrastructure/index.html +++ b/learn/infrastructure/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/keyconcepts/index.html b/learn/keyconcepts/index.html index 6e9b6fcfe..e9511b3d9 100644 --- a/learn/keyconcepts/index.html +++ b/learn/keyconcepts/index.html @@ -3635,11 +3635,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/maintained/index.html b/learn/maintained/index.html index 456a6f335..0f90fa64c 100644 --- a/learn/maintained/index.html +++ b/learn/maintained/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/ocp-cluster-general-sizing/index.html b/learn/ocp-cluster-general-sizing/index.html index d9b58c957..b23954bbb 100644 --- a/learn/ocp-cluster-general-sizing/index.html +++ b/learn/ocp-cluster-general-sizing/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/page/2/index.html b/learn/page/2/index.html index 3744e7982..837482365 100644 --- a/learn/page/2/index.html +++ b/learn/page/2/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/page/3/index.html b/learn/page/3/index.html index e2f2486e3..7702a46af 100644 --- a/learn/page/3/index.html +++ b/learn/page/3/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/page/4/index.html b/learn/page/4/index.html index 0cc688a2f..caca0a308 100644 --- a/learn/page/4/index.html +++ b/learn/page/4/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/page/5/index.html b/learn/page/5/index.html index 13e3ebfbf..833aa6514 100644 --- a/learn/page/5/index.html +++ b/learn/page/5/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/quickstart/index.html b/learn/quickstart/index.html index 79da4dfd1..9ff4c2f83 100644 --- a/learn/quickstart/index.html +++ b/learn/quickstart/index.html @@ -3638,11 +3638,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/sandbox/index.html b/learn/sandbox/index.html index 7b220ddfc..2ff066e7b 100644 --- a/learn/sandbox/index.html +++ b/learn/sandbox/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/secrets-management-in-the-validated-patterns-framework/index.html b/learn/secrets-management-in-the-validated-patterns-framework/index.html index 57eba4797..81a2806ba 100644 --- a/learn/secrets-management-in-the-validated-patterns-framework/index.html +++ b/learn/secrets-management-in-the-validated-patterns-framework/index.html @@ -3979,11 +3979,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/secrets/index.html b/learn/secrets/index.html index 435725b83..a0a53d733 100644 --- a/learn/secrets/index.html +++ b/learn/secrets/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/test-artifacts/index.html b/learn/test-artifacts/index.html index eb3063273..c456acdd4 100644 --- a/learn/test-artifacts/index.html +++ b/learn/test-artifacts/index.html @@ -3677,11 +3677,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/tested/index.html b/learn/tested/index.html index 6d0a29c8f..39593fdea 100644 --- a/learn/tested/index.html +++ b/learn/tested/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/using-validated-pattern-operator/index.html b/learn/using-validated-pattern-operator/index.html index c470dec4a..cbd2daa92 100644 --- a/learn/using-validated-pattern-operator/index.html +++ b/learn/using-validated-pattern-operator/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/validated_patterns_frameworks/index.html b/learn/validated_patterns_frameworks/index.html index eb9aee59b..8fa324975 100644 --- a/learn/validated_patterns_frameworks/index.html +++ b/learn/validated_patterns_frameworks/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/values-files/index.html b/learn/values-files/index.html index 0de518d29..f374a228e 100644 --- a/learn/values-files/index.html +++ b/learn/values-files/index.html @@ -3642,11 +3642,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/vault/index.html b/learn/vault/index.html index 97f96be0c..520e887be 100644 --- a/learn/vault/index.html +++ b/learn/vault/index.html @@ -3653,11 +3653,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/vp_agof/index.html b/learn/vp_agof/index.html index e31208a03..1d0fd6b90 100644 --- a/learn/vp_agof/index.html +++ b/learn/vp_agof/index.html @@ -3672,11 +3672,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/vp_agof_config_controller/index.html b/learn/vp_agof_config_controller/index.html index ed0171b2a..e0cf8ba8f 100644 --- a/learn/vp_agof_config_controller/index.html +++ b/learn/vp_agof_config_controller/index.html @@ -3713,11 +3713,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/vp_openshift_framework/index.html b/learn/vp_openshift_framework/index.html index e0cabbaeb..7f3b589f8 100644 --- a/learn/vp_openshift_framework/index.html +++ b/learn/vp_openshift_framework/index.html @@ -3781,11 +3781,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/vp_structure_vp_pattern/index.html b/learn/vp_structure_vp_pattern/index.html index 25034ff99..2e53d13b1 100644 --- a/learn/vp_structure_vp_pattern/index.html +++ b/learn/vp_structure_vp_pattern/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/learn/workflow/index.html b/learn/workflow/index.html index af175930e..9aa622966 100644 --- a/learn/workflow/index.html +++ b/learn/workflow/index.html @@ -3649,11 +3649,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/ansible-automation-platform/index.html b/patterns/ansible-edge-gitops-kasten/ansible-automation-platform/index.html index 2cb21b811..5e1724179 100644 --- a/patterns/ansible-edge-gitops-kasten/ansible-automation-platform/index.html +++ b/patterns/ansible-edge-gitops-kasten/ansible-automation-platform/index.html @@ -3658,11 +3658,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/cluster-sizing/index.html b/patterns/ansible-edge-gitops-kasten/cluster-sizing/index.html index 4c1b838bf..4a00f0cfc 100644 --- a/patterns/ansible-edge-gitops-kasten/cluster-sizing/index.html +++ b/patterns/ansible-edge-gitops-kasten/cluster-sizing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/getting-started/index.html b/patterns/ansible-edge-gitops-kasten/getting-started/index.html index b2565a40b..6b5231967 100644 --- a/patterns/ansible-edge-gitops-kasten/getting-started/index.html +++ b/patterns/ansible-edge-gitops-kasten/getting-started/index.html @@ -3718,11 +3718,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/ideas-for-customization/index.html b/patterns/ansible-edge-gitops-kasten/ideas-for-customization/index.html index 6d73a8404..6e174420d 100644 --- a/patterns/ansible-edge-gitops-kasten/ideas-for-customization/index.html +++ b/patterns/ansible-edge-gitops-kasten/ideas-for-customization/index.html @@ -3818,11 +3818,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/index.html b/patterns/ansible-edge-gitops-kasten/index.html index 38117d063..1b771f18d 100644 --- a/patterns/ansible-edge-gitops-kasten/index.html +++ b/patterns/ansible-edge-gitops-kasten/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/installation-details/index.html b/patterns/ansible-edge-gitops-kasten/installation-details/index.html index fa9077b92..08a392bba 100644 --- a/patterns/ansible-edge-gitops-kasten/installation-details/index.html +++ b/patterns/ansible-edge-gitops-kasten/installation-details/index.html @@ -3632,11 +3632,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/openshift-virtualization/index.html b/patterns/ansible-edge-gitops-kasten/openshift-virtualization/index.html index e7a5b7ae2..7842f4ba8 100644 --- a/patterns/ansible-edge-gitops-kasten/openshift-virtualization/index.html +++ b/patterns/ansible-edge-gitops-kasten/openshift-virtualization/index.html @@ -3830,11 +3830,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/troubleshooting/index.html b/patterns/ansible-edge-gitops-kasten/troubleshooting/index.html index 2e5aeb6bd..50390c45d 100644 --- a/patterns/ansible-edge-gitops-kasten/troubleshooting/index.html +++ b/patterns/ansible-edge-gitops-kasten/troubleshooting/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops-kasten/veeam-kasten/index.html b/patterns/ansible-edge-gitops-kasten/veeam-kasten/index.html index de6a4e5b4..d635fb801 100644 --- a/patterns/ansible-edge-gitops-kasten/veeam-kasten/index.html +++ b/patterns/ansible-edge-gitops-kasten/veeam-kasten/index.html @@ -3633,11 +3633,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/ansible-automation-platform/index.html b/patterns/ansible-edge-gitops/ansible-automation-platform/index.html index 0a3d1fcba..6af526e7f 100644 --- a/patterns/ansible-edge-gitops/ansible-automation-platform/index.html +++ b/patterns/ansible-edge-gitops/ansible-automation-platform/index.html @@ -3658,11 +3658,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/cluster-sizing/index.html b/patterns/ansible-edge-gitops/cluster-sizing/index.html index cb57b0f2d..1ba1417db 100644 --- a/patterns/ansible-edge-gitops/cluster-sizing/index.html +++ b/patterns/ansible-edge-gitops/cluster-sizing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/getting-started/index.html b/patterns/ansible-edge-gitops/getting-started/index.html index a72d9f641..cb0903919 100644 --- a/patterns/ansible-edge-gitops/getting-started/index.html +++ b/patterns/ansible-edge-gitops/getting-started/index.html @@ -3689,11 +3689,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/ideas-for-customization/index.html b/patterns/ansible-edge-gitops/ideas-for-customization/index.html index 52663400b..23f8d335e 100644 --- a/patterns/ansible-edge-gitops/ideas-for-customization/index.html +++ b/patterns/ansible-edge-gitops/ideas-for-customization/index.html @@ -3818,11 +3818,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/index.html b/patterns/ansible-edge-gitops/index.html index b62179933..49d864f13 100644 --- a/patterns/ansible-edge-gitops/index.html +++ b/patterns/ansible-edge-gitops/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/installation-details/index.html b/patterns/ansible-edge-gitops/installation-details/index.html index 6a85be956..7e88df48b 100644 --- a/patterns/ansible-edge-gitops/installation-details/index.html +++ b/patterns/ansible-edge-gitops/installation-details/index.html @@ -3632,11 +3632,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/openshift-virtualization/index.html b/patterns/ansible-edge-gitops/openshift-virtualization/index.html index b70c9a60e..a4320a988 100644 --- a/patterns/ansible-edge-gitops/openshift-virtualization/index.html +++ b/patterns/ansible-edge-gitops/openshift-virtualization/index.html @@ -3830,11 +3830,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-edge-gitops/troubleshooting/index.html b/patterns/ansible-edge-gitops/troubleshooting/index.html index 653c479dd..6e73de1f1 100644 --- a/patterns/ansible-edge-gitops/troubleshooting/index.html +++ b/patterns/ansible-edge-gitops/troubleshooting/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/ansible-gitops-framework/index.html b/patterns/ansible-gitops-framework/index.html index 5dbf26c92..caa2e46e9 100644 --- a/patterns/ansible-gitops-framework/index.html +++ b/patterns/ansible-gitops-framework/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/cockroachdb/index.html b/patterns/cockroachdb/index.html index 414f764f8..7dcd2a719 100644 --- a/patterns/cockroachdb/index.html +++ b/patterns/cockroachdb/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/connected-vehicle-architecture/index.html b/patterns/connected-vehicle-architecture/index.html index 24cd598c2..49d5e0dbc 100644 --- a/patterns/connected-vehicle-architecture/index.html +++ b/patterns/connected-vehicle-architecture/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/cluster-sizing/index.html b/patterns/devsecops/cluster-sizing/index.html index c9bc59e73..6292205cf 100644 --- a/patterns/devsecops/cluster-sizing/index.html +++ b/patterns/devsecops/cluster-sizing/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/devel-cluster/index.html b/patterns/devsecops/devel-cluster/index.html index 2bf6ef4b8..ece931741 100644 --- a/patterns/devsecops/devel-cluster/index.html +++ b/patterns/devsecops/devel-cluster/index.html @@ -3638,11 +3638,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/getting-started/index.html b/patterns/devsecops/getting-started/index.html index f829dd23d..b199b87de 100644 --- a/patterns/devsecops/getting-started/index.html +++ b/patterns/devsecops/getting-started/index.html @@ -3664,11 +3664,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/ideas-for-customization/index.html b/patterns/devsecops/ideas-for-customization/index.html index c6d00be16..d01277a7f 100644 --- a/patterns/devsecops/ideas-for-customization/index.html +++ b/patterns/devsecops/ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/index.html b/patterns/devsecops/index.html index fa2aa4cc3..05d7e3d46 100644 --- a/patterns/devsecops/index.html +++ b/patterns/devsecops/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/production-cluster/index.html b/patterns/devsecops/production-cluster/index.html index c8fdac02a..fedcc80c8 100644 --- a/patterns/devsecops/production-cluster/index.html +++ b/patterns/devsecops/production-cluster/index.html @@ -3636,11 +3636,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/devsecops/secure-supply-chain-demo/index.html b/patterns/devsecops/secure-supply-chain-demo/index.html index 590e94969..c6bb39bfc 100644 --- a/patterns/devsecops/secure-supply-chain-demo/index.html +++ b/patterns/devsecops/secure-supply-chain-demo/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/emerging-disease-detection/edd-getting-started/index.html b/patterns/emerging-disease-detection/edd-getting-started/index.html index aa1e593f6..f32b2eb1c 100644 --- a/patterns/emerging-disease-detection/edd-getting-started/index.html +++ b/patterns/emerging-disease-detection/edd-getting-started/index.html @@ -3684,11 +3684,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/emerging-disease-detection/index.html b/patterns/emerging-disease-detection/index.html index 826e52ab4..f382b20a2 100644 --- a/patterns/emerging-disease-detection/index.html +++ b/patterns/emerging-disease-detection/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-getting-started/index.html b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-getting-started/index.html index 999f1a7dc..977e5e098 100644 --- a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-getting-started/index.html +++ b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-getting-started/index.html @@ -3668,11 +3668,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-required-hardware/index.html b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-required-hardware/index.html index 813cb9685..687aa97e8 100644 --- a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-required-hardware/index.html +++ b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-required-hardware/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-troubleshooting/index.html b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-troubleshooting/index.html index 6312cb4be..d62ed21c0 100644 --- a/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-troubleshooting/index.html +++ b/patterns/gaudi-rag-chat-qna/gaudi-rag-chat-qna-troubleshooting/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/gaudi-rag-chat-qna/index.html b/patterns/gaudi-rag-chat-qna/index.html index a4a174ea8..f07f05db5 100644 --- a/patterns/gaudi-rag-chat-qna/index.html +++ b/patterns/gaudi-rag-chat-qna/index.html @@ -3627,11 +3627,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/hypershift/index.html b/patterns/hypershift/index.html index 7ddeb56aa..d0b8ed5fe 100644 --- a/patterns/hypershift/index.html +++ b/patterns/hypershift/index.html @@ -3626,11 +3626,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/index.html b/patterns/index.html index bbf4525f1..cffb8410b 100644 --- a/patterns/index.html +++ b/patterns/index.html @@ -3726,11 +3726,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/application/index.html b/patterns/industrial-edge/application/index.html index 91521227a..f71e2271a 100644 --- a/patterns/industrial-edge/application/index.html +++ b/patterns/industrial-edge/application/index.html @@ -3632,11 +3632,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/cluster-sizing/index.html b/patterns/industrial-edge/cluster-sizing/index.html index 31c6ad9eb..740b009f0 100644 --- a/patterns/industrial-edge/cluster-sizing/index.html +++ b/patterns/industrial-edge/cluster-sizing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/demo-script/index.html b/patterns/industrial-edge/demo-script/index.html index f8f862ce9..b7fb7d5bd 100644 --- a/patterns/industrial-edge/demo-script/index.html +++ b/patterns/industrial-edge/demo-script/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/factory/index.html b/patterns/industrial-edge/factory/index.html index 65a89c75f..318d84c67 100644 --- a/patterns/industrial-edge/factory/index.html +++ b/patterns/industrial-edge/factory/index.html @@ -3638,11 +3638,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/getting-started/index.html b/patterns/industrial-edge/getting-started/index.html index fc7dc1fd7..fa7b64b8f 100644 --- a/patterns/industrial-edge/getting-started/index.html +++ b/patterns/industrial-edge/getting-started/index.html @@ -3731,11 +3731,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/ideas-for-customization/index.html b/patterns/industrial-edge/ideas-for-customization/index.html index 7ad948cb2..24d242833 100644 --- a/patterns/industrial-edge/ideas-for-customization/index.html +++ b/patterns/industrial-edge/ideas-for-customization/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/index.html b/patterns/industrial-edge/index.html index 02d17498e..a0042b8e7 100644 --- a/patterns/industrial-edge/index.html +++ b/patterns/industrial-edge/index.html @@ -3631,11 +3631,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/industrial-edge/troubleshooting/index.html b/patterns/industrial-edge/troubleshooting/index.html index c4c268251..677600f89 100644 --- a/patterns/industrial-edge/troubleshooting/index.html +++ b/patterns/industrial-edge/troubleshooting/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/kong-gateway/index.html b/patterns/kong-gateway/index.html index 8c547cad3..c2c6473f0 100644 --- a/patterns/kong-gateway/index.html +++ b/patterns/kong-gateway/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis-amx/cluster-sizing/index.html b/patterns/medical-diagnosis-amx/cluster-sizing/index.html index 8b53e14d0..bb883047a 100644 --- a/patterns/medical-diagnosis-amx/cluster-sizing/index.html +++ b/patterns/medical-diagnosis-amx/cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis-amx/getting-started/index.html b/patterns/medical-diagnosis-amx/getting-started/index.html index ee6e83d91..3ec282ed9 100644 --- a/patterns/medical-diagnosis-amx/getting-started/index.html +++ b/patterns/medical-diagnosis-amx/getting-started/index.html @@ -3704,11 +3704,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis-amx/ideas-for-customization/index.html b/patterns/medical-diagnosis-amx/ideas-for-customization/index.html index c5f303e56..793c32dd0 100644 --- a/patterns/medical-diagnosis-amx/ideas-for-customization/index.html +++ b/patterns/medical-diagnosis-amx/ideas-for-customization/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis-amx/index.html b/patterns/medical-diagnosis-amx/index.html index a111924cc..740fd24cb 100644 --- a/patterns/medical-diagnosis-amx/index.html +++ b/patterns/medical-diagnosis-amx/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis-amx/troubleshooting/index.html b/patterns/medical-diagnosis-amx/troubleshooting/index.html index 55ecfd7aa..965eac9fb 100644 --- a/patterns/medical-diagnosis-amx/troubleshooting/index.html +++ b/patterns/medical-diagnosis-amx/troubleshooting/index.html @@ -3655,11 +3655,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/cluster-sizing/index.html b/patterns/medical-diagnosis/cluster-sizing/index.html index 9f3e24c67..3afe39200 100644 --- a/patterns/medical-diagnosis/cluster-sizing/index.html +++ b/patterns/medical-diagnosis/cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/demo-script/index.html b/patterns/medical-diagnosis/demo-script/index.html index 6b96b4368..71cc20482 100644 --- a/patterns/medical-diagnosis/demo-script/index.html +++ b/patterns/medical-diagnosis/demo-script/index.html @@ -3633,11 +3633,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/getting-started/index.html b/patterns/medical-diagnosis/getting-started/index.html index 9f38d9806..625fe5ad7 100644 --- a/patterns/medical-diagnosis/getting-started/index.html +++ b/patterns/medical-diagnosis/getting-started/index.html @@ -3697,11 +3697,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/ideas-for-customization/index.html b/patterns/medical-diagnosis/ideas-for-customization/index.html index feab3aae3..a02304a52 100644 --- a/patterns/medical-diagnosis/ideas-for-customization/index.html +++ b/patterns/medical-diagnosis/ideas-for-customization/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/index.html b/patterns/medical-diagnosis/index.html index 07651154a..ac7611045 100644 --- a/patterns/medical-diagnosis/index.html +++ b/patterns/medical-diagnosis/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/medical-diagnosis/troubleshooting/index.html b/patterns/medical-diagnosis/troubleshooting/index.html index 4286fb5aa..018c02de4 100644 --- a/patterns/medical-diagnosis/troubleshooting/index.html +++ b/patterns/medical-diagnosis/troubleshooting/index.html @@ -3655,11 +3655,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/mlops-fraud-detection/index.html b/patterns/mlops-fraud-detection/index.html index 7dbe1c56f..575321405 100644 --- a/patterns/mlops-fraud-detection/index.html +++ b/patterns/mlops-fraud-detection/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/mlops-fraud-detection/mfd-getting-started/index.html b/patterns/mlops-fraud-detection/mfd-getting-started/index.html index c2dd62578..538424a7d 100644 --- a/patterns/mlops-fraud-detection/mfd-getting-started/index.html +++ b/patterns/mlops-fraud-detection/mfd-getting-started/index.html @@ -3657,11 +3657,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/mlops-fraud-detection/mfd-running-the-demo/index.html b/patterns/mlops-fraud-detection/mfd-running-the-demo/index.html index 78e91d2ef..c721c7ea7 100644 --- a/patterns/mlops-fraud-detection/mfd-running-the-demo/index.html +++ b/patterns/mlops-fraud-detection/mfd-running-the-demo/index.html @@ -3704,11 +3704,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/index.html b/patterns/multicloud-gitops-amx-rhoai/index.html index d7f36d61c..8e398a0f6 100644 --- a/patterns/multicloud-gitops-amx-rhoai/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-bert-script/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-bert-script/index.html index 2430a44cf..5489133f2 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-bert-script/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-bert-script/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-cluster-sizing/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-cluster-sizing/index.html index c3de228ab..de83f9331 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-cluster-sizing/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-demo-script/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-demo-script/index.html index 287c1fb8d..c6c48122e 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-demo-script/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-demo-script/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-getting-started/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-getting-started/index.html index d803ce751..f2b998e98 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-getting-started/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-getting-started/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-ideas-for-customization/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-ideas-for-customization/index.html index b569442bb..2437aa25c 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-imperative-actions/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-imperative-actions/index.html index a76aeae3e..026e90f49 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-imperative-actions/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-managed-cluster/index.html b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-managed-cluster/index.html index 2832fad3d..1608851c5 100644 --- a/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-managed-cluster/index.html +++ b/patterns/multicloud-gitops-amx-rhoai/mcg-amx-rhoai-managed-cluster/index.html @@ -3651,11 +3651,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/index.html b/patterns/multicloud-gitops-amx/index.html index 48049d714..d5f3d8053 100644 --- a/patterns/multicloud-gitops-amx/index.html +++ b/patterns/multicloud-gitops-amx/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/mcg-amx-cluster-sizing/index.html b/patterns/multicloud-gitops-amx/mcg-amx-cluster-sizing/index.html index 20cb24b1a..eb6a110ba 100644 --- a/patterns/multicloud-gitops-amx/mcg-amx-cluster-sizing/index.html +++ b/patterns/multicloud-gitops-amx/mcg-amx-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/mcg-amx-getting-started/index.html b/patterns/multicloud-gitops-amx/mcg-amx-getting-started/index.html index edcf32979..054f50cab 100644 --- a/patterns/multicloud-gitops-amx/mcg-amx-getting-started/index.html +++ b/patterns/multicloud-gitops-amx/mcg-amx-getting-started/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/mcg-amx-ideas-for-customization/index.html b/patterns/multicloud-gitops-amx/mcg-amx-ideas-for-customization/index.html index 53051b276..f5118b9c3 100644 --- a/patterns/multicloud-gitops-amx/mcg-amx-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-amx/mcg-amx-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/mcg-amx-imperative-actions/index.html b/patterns/multicloud-gitops-amx/mcg-amx-imperative-actions/index.html index 5df0c7d42..7859b63c9 100644 --- a/patterns/multicloud-gitops-amx/mcg-amx-imperative-actions/index.html +++ b/patterns/multicloud-gitops-amx/mcg-amx-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-amx/mcg-amx-managed-cluster/index.html b/patterns/multicloud-gitops-amx/mcg-amx-managed-cluster/index.html index 124ed6457..3f4f01ffb 100644 --- a/patterns/multicloud-gitops-amx/mcg-amx-managed-cluster/index.html +++ b/patterns/multicloud-gitops-amx/mcg-amx-managed-cluster/index.html @@ -3651,11 +3651,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-portworx/cluster-sizing/index.html b/patterns/multicloud-gitops-portworx/cluster-sizing/index.html index c8c26d3bc..e1540d3e9 100644 --- a/patterns/multicloud-gitops-portworx/cluster-sizing/index.html +++ b/patterns/multicloud-gitops-portworx/cluster-sizing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-portworx/getting-started/index.html b/patterns/multicloud-gitops-portworx/getting-started/index.html index 8c967b55c..35f6b69b5 100644 --- a/patterns/multicloud-gitops-portworx/getting-started/index.html +++ b/patterns/multicloud-gitops-portworx/getting-started/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-portworx/ideas-for-customization/index.html b/patterns/multicloud-gitops-portworx/ideas-for-customization/index.html index f0808fc9d..3d5b6e09b 100644 --- a/patterns/multicloud-gitops-portworx/ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-portworx/ideas-for-customization/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-portworx/index.html b/patterns/multicloud-gitops-portworx/index.html index 231eb41f8..a6acf6441 100644 --- a/patterns/multicloud-gitops-portworx/index.html +++ b/patterns/multicloud-gitops-portworx/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-portworx/managed-cluster/index.html b/patterns/multicloud-gitops-portworx/managed-cluster/index.html index b66df094f..8b455e0e6 100644 --- a/patterns/multicloud-gitops-portworx/managed-cluster/index.html +++ b/patterns/multicloud-gitops-portworx/managed-cluster/index.html @@ -3659,11 +3659,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/index.html b/patterns/multicloud-gitops-qat/index.html index 5f66a86e6..7ce791def 100644 --- a/patterns/multicloud-gitops-qat/index.html +++ b/patterns/multicloud-gitops-qat/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/mcg-qat-cluster-sizing/index.html b/patterns/multicloud-gitops-qat/mcg-qat-cluster-sizing/index.html index 1a5f2ea5c..6fac9776b 100644 --- a/patterns/multicloud-gitops-qat/mcg-qat-cluster-sizing/index.html +++ b/patterns/multicloud-gitops-qat/mcg-qat-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/mcg-qat-getting-started/index.html b/patterns/multicloud-gitops-qat/mcg-qat-getting-started/index.html index a0dc2f333..12d54bc03 100644 --- a/patterns/multicloud-gitops-qat/mcg-qat-getting-started/index.html +++ b/patterns/multicloud-gitops-qat/mcg-qat-getting-started/index.html @@ -3643,11 +3643,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/mcg-qat-ideas-for-customization/index.html b/patterns/multicloud-gitops-qat/mcg-qat-ideas-for-customization/index.html index d79b5a30c..491007e18 100644 --- a/patterns/multicloud-gitops-qat/mcg-qat-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-qat/mcg-qat-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/mcg-qat-imperative-actions/index.html b/patterns/multicloud-gitops-qat/mcg-qat-imperative-actions/index.html index f667ac117..6fec5dc5e 100644 --- a/patterns/multicloud-gitops-qat/mcg-qat-imperative-actions/index.html +++ b/patterns/multicloud-gitops-qat/mcg-qat-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-qat/mcg-qat-managed-cluster/index.html b/patterns/multicloud-gitops-qat/mcg-qat-managed-cluster/index.html index 9f8064ee9..9026fa937 100644 --- a/patterns/multicloud-gitops-qat/mcg-qat-managed-cluster/index.html +++ b/patterns/multicloud-gitops-qat/mcg-qat-managed-cluster/index.html @@ -3651,11 +3651,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/index.html b/patterns/multicloud-gitops-sgx-hello-world/index.html index 8a57c916a..06e8ebfa7 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-cluster-sizing/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-cluster-sizing/index.html index 913bef273..cc106aac4 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-cluster-sizing/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-demo-script/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-demo-script/index.html index 8d0384bee..ede1afe7c 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-demo-script/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-demo-script/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-getting-started/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-getting-started/index.html index 329d922d5..95fc7c402 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-getting-started/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-getting-started/index.html @@ -3630,11 +3630,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-ideas-for-customization/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-ideas-for-customization/index.html index 4817ad13c..b575ba54a 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-imperative-actions/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-imperative-actions/index.html index 48b1d2371..85c767dce 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-imperative-actions/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-managed-cluster/index.html b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-managed-cluster/index.html index 3fccc799c..8870ac55f 100644 --- a/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-managed-cluster/index.html +++ b/patterns/multicloud-gitops-sgx-hello-world/mcg-sgx-hello-world-managed-cluster/index.html @@ -3647,11 +3647,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/index.html b/patterns/multicloud-gitops-sgx/index.html index 9b13ffa15..b249b0310 100644 --- a/patterns/multicloud-gitops-sgx/index.html +++ b/patterns/multicloud-gitops-sgx/index.html @@ -3659,11 +3659,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-cluster-sizing/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-cluster-sizing/index.html index 0080cc17c..232a17309 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-cluster-sizing/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-demo-script/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-demo-script/index.html index 28d0cb131..666e38c1a 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-demo-script/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-demo-script/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-getting-started/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-getting-started/index.html index 448aedc11..6c3fc3cbc 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-getting-started/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-getting-started/index.html @@ -3715,11 +3715,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-ideas-for-customization/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-ideas-for-customization/index.html index a55b13a5d..74f6ec1b1 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-imperative-actions/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-imperative-actions/index.html index 601c1eaa9..69c4892cf 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-imperative-actions/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops-sgx/mcg-sgx-managed-cluster/index.html b/patterns/multicloud-gitops-sgx/mcg-sgx-managed-cluster/index.html index 312808cd2..9c3c15134 100644 --- a/patterns/multicloud-gitops-sgx/mcg-sgx-managed-cluster/index.html +++ b/patterns/multicloud-gitops-sgx/mcg-sgx-managed-cluster/index.html @@ -3651,11 +3651,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/index.html b/patterns/multicloud-gitops/index.html index 0516ade2c..9547a5e2c 100644 --- a/patterns/multicloud-gitops/index.html +++ b/patterns/multicloud-gitops/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-cluster-sizing/index.html b/patterns/multicloud-gitops/mcg-cluster-sizing/index.html index 99b80bb24..344734f30 100644 --- a/patterns/multicloud-gitops/mcg-cluster-sizing/index.html +++ b/patterns/multicloud-gitops/mcg-cluster-sizing/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-demo-script/index.html b/patterns/multicloud-gitops/mcg-demo-script/index.html index f4b1ddef3..e7dcd3c56 100644 --- a/patterns/multicloud-gitops/mcg-demo-script/index.html +++ b/patterns/multicloud-gitops/mcg-demo-script/index.html @@ -3644,11 +3644,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-getting-started/index.html b/patterns/multicloud-gitops/mcg-getting-started/index.html index e48baecfd..513a15bc0 100644 --- a/patterns/multicloud-gitops/mcg-getting-started/index.html +++ b/patterns/multicloud-gitops/mcg-getting-started/index.html @@ -3624,11 +3624,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-ideas-for-customization/index.html b/patterns/multicloud-gitops/mcg-ideas-for-customization/index.html index 8fe73b8d9..7eb6e4e53 100644 --- a/patterns/multicloud-gitops/mcg-ideas-for-customization/index.html +++ b/patterns/multicloud-gitops/mcg-ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-imperative-actions/index.html b/patterns/multicloud-gitops/mcg-imperative-actions/index.html index 66e85fafa..02fdd46d8 100644 --- a/patterns/multicloud-gitops/mcg-imperative-actions/index.html +++ b/patterns/multicloud-gitops/mcg-imperative-actions/index.html @@ -3662,11 +3662,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/multicloud-gitops/mcg-managed-cluster/index.html b/patterns/multicloud-gitops/mcg-managed-cluster/index.html index 57c364cd1..acb682a05 100644 --- a/patterns/multicloud-gitops/mcg-managed-cluster/index.html +++ b/patterns/multicloud-gitops/mcg-managed-cluster/index.html @@ -3651,11 +3651,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/openshift-ai/getting-started/index.html b/patterns/openshift-ai/getting-started/index.html index 9ce3ab142..2a259a1e9 100644 --- a/patterns/openshift-ai/getting-started/index.html +++ b/patterns/openshift-ai/getting-started/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/openshift-ai/index.html b/patterns/openshift-ai/index.html index 69b044cf0..b2af32e60 100644 --- a/patterns/openshift-ai/index.html +++ b/patterns/openshift-ai/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/rag-llm-gitops/getting-started/index.html b/patterns/rag-llm-gitops/getting-started/index.html index b0bf7b721..3370514ad 100644 --- a/patterns/rag-llm-gitops/getting-started/index.html +++ b/patterns/rag-llm-gitops/getting-started/index.html @@ -3665,11 +3665,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/rag-llm-gitops/gpu_provisioning/index.html b/patterns/rag-llm-gitops/gpu_provisioning/index.html index ded34dde0..0f1ff0629 100644 --- a/patterns/rag-llm-gitops/gpu_provisioning/index.html +++ b/patterns/rag-llm-gitops/gpu_provisioning/index.html @@ -3741,11 +3741,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/rag-llm-gitops/index.html b/patterns/rag-llm-gitops/index.html index d051fcd8e..d0b62e93c 100644 --- a/patterns/rag-llm-gitops/index.html +++ b/patterns/rag-llm-gitops/index.html @@ -3632,11 +3632,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/application/index.html b/patterns/retail/application/index.html index a0a48e74b..a6f0bb9f2 100644 --- a/patterns/retail/application/index.html +++ b/patterns/retail/application/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/cluster-sizing/index.html b/patterns/retail/cluster-sizing/index.html index 0b96814e7..283e85b4a 100644 --- a/patterns/retail/cluster-sizing/index.html +++ b/patterns/retail/cluster-sizing/index.html @@ -3625,11 +3625,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/components/index.html b/patterns/retail/components/index.html index 4740c67fd..0605d23bb 100644 --- a/patterns/retail/components/index.html +++ b/patterns/retail/components/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/getting-started/index.html b/patterns/retail/getting-started/index.html index 8912bced1..598f39151 100644 --- a/patterns/retail/getting-started/index.html +++ b/patterns/retail/getting-started/index.html @@ -3643,11 +3643,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/ideas-for-customization/index.html b/patterns/retail/ideas-for-customization/index.html index 8e16b7723..3414a3017 100644 --- a/patterns/retail/ideas-for-customization/index.html +++ b/patterns/retail/ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/index.html b/patterns/retail/index.html index 1a423c170..665718166 100644 --- a/patterns/retail/index.html +++ b/patterns/retail/index.html @@ -3629,11 +3629,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/store/index.html b/patterns/retail/store/index.html index 4da3594a3..7359a2478 100644 --- a/patterns/retail/store/index.html +++ b/patterns/retail/store/index.html @@ -3638,11 +3638,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/retail/troubleshooting/index.html b/patterns/retail/troubleshooting/index.html index dd5f0dfbd..35960908d 100644 --- a/patterns/retail/troubleshooting/index.html +++ b/patterns/retail/troubleshooting/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/travelops/demo-script/index.html b/patterns/travelops/demo-script/index.html index 47535b66c..1e666351b 100644 --- a/patterns/travelops/demo-script/index.html +++ b/patterns/travelops/demo-script/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/travelops/getting-started/index.html b/patterns/travelops/getting-started/index.html index 178c9ee24..6b48631f5 100644 --- a/patterns/travelops/getting-started/index.html +++ b/patterns/travelops/getting-started/index.html @@ -3647,11 +3647,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/travelops/ideas-for-customization/index.html b/patterns/travelops/ideas-for-customization/index.html index 436e06fd0..a0ae43dd5 100644 --- a/patterns/travelops/ideas-for-customization/index.html +++ b/patterns/travelops/ideas-for-customization/index.html @@ -3623,11 +3623,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/patterns/travelops/index.html b/patterns/travelops/index.html index fa51c38bf..877a77101 100644 --- a/patterns/travelops/index.html +++ b/patterns/travelops/index.html @@ -3628,11 +3628,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for. diff --git a/search/index.html b/search/index.html index a3be6a235..11d726401 100644 --- a/search/index.html +++ b/search/index.html @@ -3621,11 +3621,11 @@ Sync-Waves and ArgoCD/OpenShift GitOps Resource Hooks The way that resource hooks are designed to work is by giving ordering hints, so that ArgoCD knows what order to apply resources in. The mechanism is described in the ArgoCD upstream docs here. When sync-waves are in use, all resouces in the same sync-wave have to be "healthy" before resources in the numerically next sync-wave are synced. This mechanism gives us a way of having ArgocD help us enforce order with objects that it manages. Solution 1: Sync-Waves for Subscriptions in clusterGroup The Validated Patterns framework now allows Kubernetes annotations to be added directly to subscription objects in the clusterGroup. ArgoCD uses annotations for Resource Hooks. The clustergoup chart now passes any annotations attached to subscriptions through to the subscription object(s) that the clustergroup chart creates. For example: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "5" will result in a subscription object that includes the annotations: -apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"10"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"kubevirt-hyperconverged","namespace":"openshift-cnv"},"spec":{"channel":"stable","installPlanApproval":"Automatic","name":"kubevirt-hyperconverged","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:24:31Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv resourceVersion: "46763" uid: e9b3892c-9383-41ca-9e8f-ae7be82f012f spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"operators.coreos.com/v1alpha1","kind":"Subscription","metadata":{"annotations":{"argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator","namespace":"openshift-storage"},"spec":{"installPlanApproval":"Automatic","name":"odf-operator","source":"redhat-operators","sourceNamespace":"openshift-marketplace"}} creationTimestamp: "2024-11-07T14:21:12Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage resourceVersion: "56652" uid: 2d9f026f-50e6-4fc1-ad11-8a6a2a636017 spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. +apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "10" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/kubevirt-hyperconverged.openshift-cnv: "" name: kubevirt-hyperconverged namespace: openshift-cnv spec: channel: stable installPlanApproval: Automatic name: kubevirt-hyperconverged source: redhat-operators sourceNamespace: openshift-marketplace apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub operators.coreos.com/odf-operator.openshift-storage: "" name: odf-operator namespace: openshift-storage spec: installPlanApproval: Automatic name: odf-operator source: redhat-operators sourceNamespace: openshift-marketplace With this configuration, any objects created with sync-waves lower than "10" must be healthy before the objects in sync-wave "10" sync. In particular, the odf-operator subscription must be healthy before the kubevirt-hyperconverged subscription will sync. Similarly, if we defined objects with higher sync-waves than "10", all the resources with sync-waves higher than "10" will wait until the resources in "10" are healthy. If the subscriptions in question wait until their components are healthy before reporting they are healthy themselves, this might be all you need to do. In the case of this particular issue, it was not enough. But because all sequencing in ArgoCD requires the use of sync-wave annotations, adding the annotation to the subscription object will be necessary for using the other solutions. Solution 2: The sequenceJob attribute for Subscriptions in clusterGroup In this situation, we have a subscription that installs an operator, but it is not enough for just the subscriptions to be in sync-waves. This is because the subscriptions install operators, and it is the action of the operators themselves that we have to sequence. In many of these kinds of situations, we can sequence the action by looking for the existence of a single resource. The new sequenceJob construct in subscriptions allows for this kind of relationship by creating a Job at the same sync-wave precedence as the subscription, and looking for the existence of a single arbitrary resource in an arbitrary namespace. The Job then waits for that resource to appear, and when it does, it will be seen as "healthy" and will allow future sync-waves to proceed. In this example, the ODF operator needs to have created a storageclass so that the OCP-Virt operators can use it as virtualization storage. If it does not find the kind of storage it wants, it will use the default storageclass instead, which may lead to inconsistencies in behavior. We can have the Validated Patterns framework create a mostly boilerplate job to look for the needed resource this way: openshift-virtualization: name: kubevirt-hyperconverged namespace: openshift-cnv channel: stable annotations: argocd.argoproj.io/sync-wave: "10" openshift-data-foundation: name: odf-operator namespace: openshift-storage sequenceJob: resourceType: sc resourceName: ocs-storagecluster-ceph-rbd annotations: argocd.argoproj.io/sync-wave: "5" Note the addition of the sequenceJob section in the odf-operator subscription block. This structure will result in the following Job being created alongside the subscriptions: -apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"batch/v1","kind":"Job","metadata":{"annotations":{"argocd.argoproj.io/hook":"Sync","argocd.argoproj.io/sync-wave":"5"},"labels":{"app.kubernetes.io/instance":"ansible-edge-gitops-hub"},"name":"odf-operator-sequencejob","namespace":"openshift-operators"},"spec":{"completions":1,"parallelism":1,"template":{"spec":{"containers":[{"command":["/bin/bash","-c","while [ 1 ];\\ndo\\n oc get sc ocs-storagecluster-ceph-rbd \\u0026\\u0026 break\\n echo \\"sc ocs-storagecluster-ceph-rbd not found, waiting...\\"\\n sleep 5\\ndone\\necho \\"sc ocs-storagecluster-ceph-rbd found, exiting...\\"\\nexit 0\\n"],"image":"quay.io/hybridcloudpatterns/imperative-container:v1","name":"odf-operator-sequencejob"}],"restartPolicy":"OnFailure"}}}} creationTimestamp: "2024-11-07T16:27:26Z" generation: 1 labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators resourceVersion: "201283" uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. +apiVersion: batch/v1 kind: Job metadata: annotations: argocd.argoproj.io/hook: Sync argocd.argoproj.io/sync-wave: "5" labels: app.kubernetes.io/instance: ansible-edge-gitops-hub name: odf-operator-sequencejob namespace: openshift-operators spec: backoffLimit: 6 completionMode: NonIndexed completions: 1 manualSelector: false parallelism: 1 podReplacementPolicy: TerminatingOrFailed selector: matchLabels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a suspend: false template: metadata: creationTimestamp: null labels: batch.kubernetes.io/controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a batch.kubernetes.io/job-name: odf-operator-sequencejob controller-uid: 3084075d-bc1f-4e23-b44d-a13c5d184a6a job-name: odf-operator-sequencejob spec: containers: - command: - /bin/bash - -c - | while [ 1 ]; do oc get sc ocs-storagecluster-ceph-rbd && break echo "sc ocs-storagecluster-ceph-rbd not found, waiting..." sleep 5 done echo "sc ocs-storagecluster-ceph-rbd found, exiting..." exit 0 image: quay.io/hybridcloudpatterns/imperative-container:v1 imagePullPolicy: IfNotPresent name: odf-operator-sequencejob resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: OnFailure schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 Since the job is created in sync-wave "5" (which it inherits from the subscription it is attached to by default, though you can specify a different sync-wave if you prefer), this job must complete before sync-wave "10" starts. So the storageclass ocs-storagecluster-ceph-rbd must exist before OCP-Virt starts deploying, ensuring that it will be able to "see" and use that storageclass as its default virtualization storage class. Each subscription is permitted one sequenceJob. Each sequenceJob may have the following attributes: syncWave: Defaults to the subscription’s syncwave from annotations. resourceType: Resource kind for the resource to watch for.