From 7de4ee9eb93f38efcb562827639620a9eb4e2b1b Mon Sep 17 00:00:00 2001 From: "cqu@redhat.com" Date: Fri, 14 Jan 2022 14:50:51 +0800 Subject: [PATCH 001/150] Initial commit --- README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..4d36ce5f9 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# observability_core_automation \ No newline at end of file From 406f5c34c6555286572ce41e975fb7144af94fee Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 14 Jan 2022 15:46:18 +0800 Subject: [PATCH 002/150] copy 2.4 auto case here Signed-off-by: Chang Liang Qu --- CONTRIBUTING.md | 55 + DCO | 37 + Jenkinsfile | 95 + Jenkinsfile_upgrade | 79 + LICENSE | 201 + Makefile | 58 + OWNERS | 9 + REMEDIATE.md | 5 + SECURITY.md | 4 + cicd-scripts/Configfile | 30 + cicd-scripts/build.sh | 14 + cicd-scripts/copyright-check.sh | 168 + cicd-scripts/customize-mco.sh | 201 + cicd-scripts/deploy-to-cluster.sh | 4 + cicd-scripts/install-dependencies.sh | 17 + cicd-scripts/run-e2e-in-kind-via-prow.sh | 44 + cicd-scripts/run-e2e-tests.sh | 89 + cicd-scripts/run-unit-tests.sh | 9 + cicd-scripts/setup-e2e-tests.sh | 275 ++ cicd-scripts/update-check-mco-csv.sh | 54 + collectors/metrics/Dockerfile | 57 + collectors/metrics/OWNERS | 11 + collectors/metrics/README.md | 59 + .../metrics/cmd/metrics-collector/main.go | 400 ++ .../cmd/metrics-collector/main_test.go | 50 + collectors/metrics/pkg/forwarder/forwarder.go | 479 ++ .../metrics/pkg/forwarder/forwarder_test.go | 260 ++ collectors/metrics/pkg/http/client.go | 101 + collectors/metrics/pkg/http/roundtripper.go | 81 + collectors/metrics/pkg/http/routes.go | 50 + collectors/metrics/pkg/logger/logger.go | 60 + .../metrics/pkg/metricfamily/anonymize.go | 82 + collectors/metrics/pkg/metricfamily/count.go | 16 + .../pkg/metricfamily/drop_timestamp.go | 19 + .../pkg/metricfamily/drop_timestamp_test.go | 113 + .../metrics/pkg/metricfamily/drop_unsorted.go | 26 + collectors/metrics/pkg/metricfamily/elide.go | 40 + .../metrics/pkg/metricfamily/elide_test.go | 216 + collectors/metrics/pkg/metricfamily/empty.go | 12 + .../metrics/pkg/metricfamily/expired.go | 30 + .../metrics/pkg/metricfamily/invalid.go | 192 + collectors/metrics/pkg/metricfamily/label.go | 76 + .../pkg/metricfamily/multi_transformer.go | 42 + collectors/metrics/pkg/metricfamily/none.go | 5 + .../metrics/pkg/metricfamily/overwrite.go | 51 + collectors/metrics/pkg/metricfamily/pack.go | 62 + collectors/metrics/pkg/metricfamily/rename.go | 17 + .../metrics/pkg/metricfamily/required.go | 43 + collectors/metrics/pkg/metricfamily/sort.go | 128 + .../metrics/pkg/metricfamily/transform.go | 41 + .../pkg/metricfamily/transform_test.go | 117 + .../metrics/pkg/metricfamily/unsorted.go | 44 + .../metrics/pkg/metricfamily/whitelist.go | 63 + .../pkg/metricfamily/whitelist_test.go | 206 + .../pkg/metricsclient/metricsclient.go | 577 +++ .../pkg/metricsclient/metricsclient_test.go | 211 + collectors/metrics/pkg/reader/reader.go | 50 + collectors/metrics/pkg/reader/reader_test.go | 46 + collectors/metrics/pkg/simulator/simulator.go | 111 + .../metrics/pkg/simulator/simulator_test.go | 14 + collectors/metrics/pkg/status/status.go | 131 + collectors/metrics/pkg/status/status_test.go | 65 + collectors/metrics/test/integration/clean.sh | 22 + .../integration/kind/kind-hub.config.yaml | 14 + .../client-serving-certs-ca-bundle.yaml | 9 + .../integration/manifests/deployment.yaml | 136 + .../manifests/metrics-collector-cert.yaml | 13 + .../observatorium-api-configmap.yaml | 30 + .../manifests/observatorium-api-secret.yaml | 15 + .../manifests/observatorium-api-service.yaml | 28 + .../manifests/observatorium-api.yaml | 92 + .../manifests/observatorium-ca-cert.yaml | 11 + .../integration/manifests/rolebinding.yaml | 12 + .../integration/manifests/thanos-api.yaml | 167 + .../manifests/thanos-configmap.yaml | 7 + .../integration/manifests/thanos-pvc.yaml | 18 + .../integration/manifests/thanos-service.yaml | 36 + collectors/metrics/test/integration/prereq.sh | 51 + collectors/metrics/test/integration/setup.sh | 182 + collectors/metrics/testdata/service-ca.crt | 20 + collectors/metrics/testdata/timeseries.txt | 10 + collectors/metrics/testdata/tls/ca.crt | 21 + collectors/metrics/testdata/tls/tls.crt | 21 + collectors/metrics/testdata/tls/tls.key | 27 + collectors/metrics/testdata/token | 1 + docs/MoreAboutPersistentStorage.md | 43 + docs/MultiClusterObservability-CRD.md | 680 +++ .../multicluster-observability-operator.png | Bin 0 -> 60266 bytes docs/images/observability_overview_in_ocm.png | Bin 0 -> 38201 bytes docs/scale-perf.md | 65 + docs/setup-ceph-for-object-storage.md | 202 + docs/setup-ocs-for-object-storage.md | 50 + .../custom_rules_invalid/kustomization.yaml | 4 + .../thanos-ruler-custom-rules-invalid.yaml | 18 + .../custom_rules_valid/kustomization.yaml | 4 + .../thanos-ruler-custom-rules-valid.yaml | 18 + examples/ceph/cluster.yaml | 264 ++ examples/ceph/object-user.yaml | 8 + examples/ceph/object.yaml | 58 + examples/ceph/operator.yaml | 485 +++ examples/ceph/scc.yaml | 44 + examples/ceph/toolbox.yaml | 45 + .../custom-kubernetes-pvc-dashboard.yaml | 506 +++ .../custom-metrics-allowlist.yaml | 9 + .../custom-number-of-clusters.yaml | 184 + .../dashboard_subscription.yaml | 25 + .../kustomization.yaml | 6 + .../custom-sample-dashboard.yaml | 18 + .../kustomization.yaml | 2 + .../kustomization.yaml | 2 + .../update-custom-sample-dashboard.yaml | 18 + examples/mco/e2e/v1beta1/kustomization.yaml | 2 + ...servability-v1beta1-to-v1beta2-golden.yaml | 28 + examples/mco/e2e/v1beta1/observability.yaml | 18 + examples/mco/e2e/v1beta2/kustomization.yaml | 2 + examples/mco/e2e/v1beta2/observability.yaml | 114 + .../allowlist/custom-metrics-allowlist.yaml | 13 + examples/metrics/allowlist/kustomization.yaml | 2 + examples/minio/kustomization.yaml | 5 + examples/minio/minio-deployment.yaml | 41 + examples/minio/minio-pvc.yaml | 14 + examples/minio/minio-secret.yaml | 8 + examples/minio/minio-service.yaml | 13 + examples/policy/kustomization.yaml | 3 + examples/policy/limitRange.yaml | 14 + examples/policy/resourceQuota.yaml | 9 + go.mod | 198 + go.sum | 3843 +++++++++++++++++ hack/boilerplate.go.txt | 15 + loaders/dashboards/Dockerfile | 52 + loaders/dashboards/OWNERS | 8 + loaders/dashboards/README.md | 14 + loaders/dashboards/cmd/main.go | 38 + .../dashboards/examples/k8s-dashboard.yaml | 1833 ++++++++ .../pkg/controller/dashboard_controller.go | 381 ++ .../controller/dashboard_controller_test.go | 391 ++ loaders/dashboards/pkg/util/grafana_util.go | 74 + .../dashboards/pkg/util/grafana_util_test.go | 46 + operators/endpointmetrics/Dockerfile | 58 + operators/endpointmetrics/OWNERS | 9 + operators/endpointmetrics/README.md | 140 + .../config/certmanager/certificate.yaml | 25 + .../config/certmanager/kustomization.yaml | 5 + .../config/certmanager/kustomizeconfig.yaml | 16 + ...ter-management.io_observabilityaddons.yaml | 94 + .../config/crd/kustomization.yaml | 2 + .../config/default/kustomization.yaml | 18 + .../config/manager/kustomization.yaml | 2 + .../config/manager/manager.yaml | 58 + .../endpointmetrics/config/rbac/emo_role.yaml | 119 + .../config/rbac/emo_rolebinding.yaml | 14 + .../config/rbac/emo_serviceaccount.yaml | 7 + .../config/rbac/kustomization.yaml | 4 + .../config/samples/kustomization.yaml | 4 + ...agement.io_v1beta1_observabilityaddon.yaml | 7 + .../metrics_collector.go | 304 ++ .../metrics_collector_test.go | 87 + .../observabilityaddon_controller.go | 354 ++ .../observabilityaddon_controller_test.go | 343 ++ .../ocp_monitoring_config.go | 502 +++ .../ocp_monitoring_config_test.go | 251 ++ .../observabilityendpoint/ocp_resource.go | 196 + .../ocp_resource_test.go | 107 + .../observabilityendpoint/predicate_func.go | 73 + .../predicate_func_test.go | 161 + .../controllers/status/status_controller.go | 101 + .../status/status_controller_test.go | 114 + operators/endpointmetrics/doc/API_design.md | 68 + .../endpointmetrics/doc/API_design_future.md | 96 + .../doc/ObservabilityAddon_Flow_Sprint1.png | Bin 0 -> 35809 bytes .../doc/ObservabilityAddon_Flow_Sprint2.png | Bin 0 -> 52899 bytes operators/endpointmetrics/main.go | 145 + .../prometheus/kube-prometheus-rules.yaml | 32 + .../kube-state-metrics-clusterRole.yaml | 113 + ...kube-state-metrics-clusterRoleBinding.yaml | 12 + .../kube-state-metrics-deployment.yaml | 84 + .../kube-state-metrics-service.yaml | 21 + .../kube-state-metrics-serviceAccount.yaml | 5 + .../kubernetes-monitoring-alertingrules.yaml | 962 +++++ .../kubernetes-monitoring-rules.yaml | 687 +++ .../manifests/prometheus/kustomization.yaml | 27 + .../prometheus/node-exporter-clusterRole.yaml | 17 + .../node-exporter-clusterRoleBinding.yaml | 12 + .../prometheus/node-exporter-daemonset.yaml | 92 + .../prometheus/node-exporter-rules.yaml | 65 + .../prometheus/node-exporter-service.yaml | 18 + .../node-exporter-serviceAccount.yaml | 5 + .../prometheus/prometheus-clusterRole.yaml | 36 + .../prometheus-clusterRoleBinding.yaml | 12 + .../prometheus/prometheus-config.yaml | 780 ++++ .../prometheus/prometheus-role-default.yaml | 34 + .../prometheus-role-kube-system.yaml | 34 + .../manifests/prometheus/prometheus-role.yaml | 32 + .../prometheus-roleBinding-default.yaml | 17 + .../prometheus-roleBinding-kube-system.yaml | 17 + .../prometheus/prometheus-roleBinding.yaml | 14 + .../prometheus/prometheus-service.yaml | 18 + .../prometheus/prometheus-serviceAccount.yaml | 5 + .../prometheus/prometheus-statefulset.yaml | 165 + .../endpointmetrics/pkg/rendering/renderer.go | 175 + .../pkg/rendering/renderer_test.go | 67 + .../pkg/rendering/templates/templates.go | 24 + .../pkg/rendering/templates/templates_test.go | 28 + operators/endpointmetrics/pkg/util/client.go | 76 + operators/endpointmetrics/pkg/util/lease.go | 51 + operators/endpointmetrics/pkg/util/status.go | 49 + .../endpointmetrics/pkg/util/status_test.go | 70 + operators/endpointmetrics/version/version.go | 7 + .../multiclusterobservability/Dockerfile | 62 + operators/multiclusterobservability/Makefile | 158 + operators/multiclusterobservability/OWNERS | 9 + operators/multiclusterobservability/PROJECT | 18 + .../multiclusterobservability_shared.go | 95 + .../api/shared/zz_generated.deepcopy.go | 77 + .../api/v1beta1/groupversion_info.go | 38 + .../multiclusterobservability_conversion.go | 124 + .../multiclusterobservability_types.go | 145 + .../api/v1beta1/observabilityaddon_types.go | 57 + .../api/v1beta1/zz_generated.deepcopy.go | 265 ++ .../api/v1beta2/groupversion_info.go | 38 + .../multiclusterobservability_conversion.go | 14 + .../multiclusterobservability_types.go | 223 + .../multiclusterobservability_webhook.go | 208 + .../api/v1beta2/zz_generated.deepcopy.go | 355 ++ .../bundle.Dockerfile | 15 + .../core.observatorium.io_observatoria.yaml | 1781 ++++++++ .../manager-config_v1_configmap.yaml | 17 + ...bility-operator.clusterserviceversion.yaml | 549 +++ ...ervability-webhook-service_v1_service.yaml | 18 + ...gement.io_multiclusterobservabilities.yaml | 824 ++++ ...ter-management.io_observabilityaddons.yaml | 108 + .../bundle/metadata/annotations.yaml | 11 + .../bundle/tests/scorecard/config.yaml | 49 + .../core.observatorium.io_observatoria.yaml | 1783 ++++++++ ...gement.io_multiclusterobservabilities.yaml | 1004 +++++ ...ter-management.io_observabilityaddons.yaml | 121 + .../config/crd/kustomization.yaml | 13 + ...sterobservabilities_cainjection_patch.yaml | 19 + .../config/default/kustomization.yaml | 9 + .../config/manager/kustomization.yaml | 11 + .../config/manager/manager.yaml | 83 + .../config/manager/manager_webhook_patch.yaml | 23 + ...bility-operator.clusterserviceversion.yaml | 58 + .../config/manifests/kustomization.yaml | 3 + .../config/rbac/kustomization.yaml | 7 + .../config/rbac/mco_role.yaml | 337 ++ .../config/rbac/mco_role_binding.yaml | 12 + .../config/rbac/mco_service_account.yaml | 5 + .../config/rbac/role.yaml | 60 + .../config/samples/kustomization.yaml | 6 + ...ity_v1beta1_multiclusterobservability.yaml | 10 + ...ervability_v1beta1_observabilityaddon.yaml | 7 + ...ity_v1beta2_multiclusterobservability.yaml | 10 + .../config/webhook/kustomization.yaml | 7 + .../config/webhook/manifests.yaml | 29 + .../config/webhook/service.yaml | 14 + .../validatingwebhookconfiguration.yaml | 29 + .../webhook/webhook_cainjection_patch.yaml | 7 + .../webhook/webhook_service_cert_patch.yaml | 8 + .../multiclusterobservability/grafana.go | 180 + .../multiclusterobservability/grafana_test.go | 24 + .../multiclusterobservability_controller.go | 799 ++++ ...lticlusterobservability_controller_test.go | 915 ++++ .../multiclusterobservability_status.go | 392 ++ .../multiclusterobservability_status_test.go | 266 ++ .../observatorium.go | 687 +++ .../observatorium_test.go | 172 + .../storageversionmigration.go | 96 + .../storageversionmigration_test.go | 90 + .../placementrule/customize_img.go | 30 + .../endpoint_metrics_operator.go | 179 + .../placementrule/hub_info_secret.go | 88 + .../placementrule/hub_info_secret_test.go | 147 + .../controllers/placementrule/manifestwork.go | 629 +++ .../placementrule/manifestwork_test.go | 425 ++ .../controllers/placementrule/namespace.go | 27 + .../placementrule/namespace_test.go | 20 + .../controllers/placementrule/obsaddon.go | 136 + .../placementrule/obsaddon_test.go | 105 + .../placementrule/placementrule_controller.go | 865 ++++ .../placementrule_controller_test.go | 332 ++ .../controllers/placementrule/role.go | 385 ++ .../controllers/placementrule/role_test.go | 224 + .../controllers/placementrule/status.go | 71 + .../controllers/placementrule/status_test.go | 68 + operators/multiclusterobservability/main.go | 316 ++ .../base/alertmanager/alert_rules.yaml | 75 + .../alertmanager-accessor-clusterrole.yaml | 13 + ...rtmanager-accessor-clusterrolebinding.yaml | 14 + .../alertmanager-accessor-serviceaccount.yaml | 7 + .../alertmanager/alertmanager-cabundle.yaml | 11 + .../alertmanager-clusterrole.yaml | 19 + .../alertmanager-clusterrolebinding.yaml | 14 + .../alertmanager/alertmanager-config.yaml | 25 + .../alertmanager/alertmanager-operated.yaml | 25 + .../base/alertmanager/alertmanager-proxy.yaml | 12 + .../alertmanager/alertmanager-service.yaml | 27 + .../alertmanager-serviceaccount.yaml | 9 + .../alertmanager-statefulset.yaml | 165 + .../base/alertmanager/kustomization.yaml | 14 + .../manifests/base/config/kustomization.yaml | 2 + .../base/config/metrics_allowlist.yaml | 169 + .../base/grafana/cluster-role-binding.yaml | 12 + .../manifests/base/grafana/cluster-role.yaml | 13 + .../manifests/base/grafana/config.yaml | 8 + .../dash-acm-clusters-overview-ocp311.yaml | 1986 +++++++++ .../grafana/dash-acm-clusters-overview.yaml | 1806 ++++++++ .../dash-acm-optimization-overview.yaml | 1625 +++++++ .../base/grafana/dash-cluster-rsrc-use.yaml | 1066 +++++ .../base/grafana/dash-k8s-apiserver.yaml | 1110 +++++ .../dash-k8s-compute-resources-cluster.yaml | 2124 +++++++++ ...-k8s-compute-resources-namespace-pods.yaml | 1478 +++++++ ...compute-resources-namespace-workloads.yaml | 1019 +++++ .../dash-k8s-compute-resources-node-pods.yaml | 1044 +++++ .../dash-k8s-compute-resources-pod.yaml | 1298 ++++++ .../dash-k8s-compute-resources-workload.yaml | 865 ++++ .../manifests/base/grafana/dash-k8s-etcd.yaml | 1367 ++++++ .../grafana/dash-k8s-networking-cluster.yaml | 1177 +++++ ...ice-level-overview-api-server-cluster.yaml | 1064 +++++ ...k8s-service-level-overview-api-server.yaml | 670 +++ .../base/grafana/dash-node-rsrc-use.yaml | 1141 +++++ .../manifests/base/grafana/deployment.yaml | 92 + .../manifests/base/grafana/ingress.yaml | 18 + .../manifests/base/grafana/kustomization.yaml | 24 + .../base/grafana/service-account.yaml | 7 + .../base/grafana/service-monitor.yaml | 17 + .../manifests/base/grafana/service.yaml | 16 + .../base/observatorium/cluster_role.yaml | 109 + .../observatorium/cluster_role_binding.yaml | 17 + .../base/observatorium/kustomization.yaml | 7 + .../base/observatorium/operator.yaml | 59 + .../base/observatorium/prometheus_role.yaml | 15 + .../prometheus_role_binding.yaml | 13 + .../base/observatorium/service-account.yaml | 5 + .../base/proxy/cluster-role-binding.yaml | 12 + .../manifests/base/proxy/cluster-role.yaml | 24 + .../manifests/base/proxy/cookie-secret.yaml | 9 + .../manifests/base/proxy/deployment.yaml | 136 + .../manifests/base/proxy/ingress.yaml | 20 + .../manifests/base/proxy/kustomization.yaml | 9 + .../base/proxy/prob-cmd-configmap.yaml | 14 + .../manifests/base/proxy/service-account.yaml | 5 + .../manifests/base/proxy/service.yaml | 19 + .../manifests/base/thanos/kustomization.yaml | 4 + .../base/thanos/thanos-ruler-clusterrole.yaml | 11 + .../thanos-ruler-clusterrolebinding.yaml | 12 + .../base/thanos/thanos-ruler-config.yaml | 18 + .../aggregate_role.yaml | 18 + .../endpoint-observability/images.yaml | 11 + .../endpoint-observability/kustomization.yaml | 9 + ...-management.io_observabilityaddon_crd.yaml | 120 + ...ent.io_observabilityaddon_v1beta1_crd.yaml | 120 + .../endpoint-observability/operator.yaml | 61 + .../endpoint-observability/role.yaml | 210 + .../endpoint-observability/role_binding.yaml | 12 + .../service_account.yaml | 6 + .../pkg/certificates/approver.go | 24 + .../pkg/certificates/approver_test.go | 41 + .../pkg/certificates/cert_agent.go | 50 + .../pkg/certificates/cert_agent_test.go | 27 + .../pkg/certificates/cert_controller.go | 237 + .../pkg/certificates/cert_controller_test.go | 124 + .../pkg/certificates/certificates.go | 442 ++ .../pkg/certificates/certificates_test.go | 119 + .../pkg/certificates/signer.go | 94 + .../pkg/certificates/signer_test.go | 49 + .../pkg/config/azure_conf.go | 52 + .../pkg/config/config.go | 1089 +++++ .../pkg/config/config_test.go | 1040 +++++ .../pkg/config/gcs_conf.go | 44 + .../pkg/config/obj_storage_conf.go | 55 + .../pkg/config/obj_storage_conf_test.go | 202 + .../pkg/config/s3_conf.go | 52 + .../pkg/rendering/renderer.go | 167 + .../pkg/rendering/renderer_alertmanager.go | 165 + .../pkg/rendering/renderer_grafana.go | 96 + .../pkg/rendering/renderer_proxy.go | 165 + .../pkg/rendering/renderer_test.go | 62 + .../pkg/rendering/renderer_thanos.go | 44 + .../pkg/rendering/templates/templates.go | 138 + .../pkg/rendering/templates/templates_test.go | 28 + .../pkg/servicemonitor/sm_controller.go | 151 + .../pkg/servicemonitor/sm_controller_test.go | 35 + .../pkg/util/client.go | 161 + .../pkg/util/clustermanagementaddon.go | 105 + .../pkg/util/clustermanagementaddon_test.go | 64 + .../pkg/util/managedclusteraddon.go | 132 + .../pkg/util/managedclusteraddon_test.go | 41 + .../pkg/webhook/webhook_controller.go | 130 + .../pkg/webhook/webhook_controller_test.go | 176 + .../multiclusterobservability/prestop.sh | 27 + operators/pkg/config/config.go | 60 + operators/pkg/deploying/deployer.go | 245 ++ operators/pkg/deploying/deployer_test.go | 453 ++ operators/pkg/rendering/patching/patcher.go | 312 ++ .../pkg/rendering/patching/patcher_test.go | 71 + operators/pkg/rendering/renderer.go | 140 + .../pkg/rendering/templates/templates.go | 101 + operators/pkg/util/obj_compare.go | 223 + operators/pkg/util/obj_compare_test.go | 330 ++ operators/pkg/util/util.go | 113 + proxy/Dockerfile | 49 + proxy/OWNERS | 10 + proxy/README.md | 15 + proxy/cmd/main.go | 70 + proxy/deploy/cluster-role-binding.yaml | 12 + proxy/deploy/cluster-role.yaml | 11 + proxy/deploy/deployment.yaml | 36 + proxy/deploy/kustomization.yaml | 6 + proxy/deploy/service-account.yaml | 5 + proxy/deploy/service.yaml | 13 + proxy/examples/managedcluster/cluster1.yaml | 9 + proxy/examples/managedcluster/cluster2.yaml | 9 + proxy/examples/rbac/README.md | 23 + proxy/examples/rbac/admin-rbac.yaml | 27 + proxy/examples/rbac/user1-rbac.yaml | 47 + proxy/examples/rbac/user2-rbac.yaml | 47 + proxy/pkg/proxy/proxy.go | 155 + proxy/pkg/proxy/proxy_test.go | 185 + proxy/pkg/proxy/tls.go | 61 + proxy/pkg/rewrite/rewrite.go | 52 + proxy/pkg/rewrite/rewrite_test.go | 84 + proxy/pkg/util/user_project.go | 77 + proxy/pkg/util/user_project_test.go | 110 + proxy/pkg/util/util.go | 287 ++ proxy/pkg/util/util_test.go | 281 ++ sonar-project.properties | 10 + tests/Dockerfile | 50 + tests/OWNERS | 8 + tests/README.md | 276 ++ tests/benchmark/README.md | 33 + tests/benchmark/clean-metrics-collector.sh | 29 + tests/benchmark/metrics-collector-view.yaml | 14 + tests/benchmark/setup-metrics-collector.sh | 70 + tests/format-results.sh | 15 + tests/grafana-dev-test.sh | 86 + tests/pkg/kustomize/render.go | 34 + tests/pkg/kustomize/render_test.go | 67 + tests/pkg/kustomize/tests/kustomization.yaml | 4 + .../thanos-ruler-custom-rules-valid.yaml | 17 + tests/pkg/testdata/ignored-metric-list | 79 + .../observability-e2e-test_suite_test.go | 213 + tests/pkg/tests/observability_addon_test.go | 191 + tests/pkg/tests/observability_alert_test.go | 342 ++ .../pkg/tests/observability_certrenew_test.go | 159 + tests/pkg/tests/observability_config_test.go | 236 + .../pkg/tests/observability_dashboard_test.go | 79 + .../observability_endpoint_preserve_test.go | 164 + .../tests/observability_grafana_dev_test.go | 42 + tests/pkg/tests/observability_grafana_test.go | 57 + tests/pkg/tests/observability_install_test.go | 187 + .../tests/observability_manifestwork_test.go | 111 + tests/pkg/tests/observability_metrics_test.go | 147 + ...servability_observatorium_preserve_test.go | 88 + .../pkg/tests/observability_reconcile_test.go | 208 + .../pkg/tests/observability_retention_test.go | 169 + tests/pkg/tests/observability_route_test.go | 201 + .../pkg/tests/observability_uninstall_test.go | 86 + tests/pkg/utils/client.go | 55 + tests/pkg/utils/cluster_deploy.go | 43 + tests/pkg/utils/install_config.go | 55 + tests/pkg/utils/mco_cert_secret.go | 48 + tests/pkg/utils/mco_clusterrolebinding.go | 57 + tests/pkg/utils/mco_configmaps.go | 53 + tests/pkg/utils/mco_dashboard.go | 70 + tests/pkg/utils/mco_deploy.go | 907 ++++ tests/pkg/utils/mco_deployments.go | 86 + tests/pkg/utils/mco_grafana.go | 14 + tests/pkg/utils/mco_managedcluster.go | 150 + tests/pkg/utils/mco_metric.go | 162 + tests/pkg/utils/mco_namespace.go | 27 + tests/pkg/utils/mco_oba.go | 90 + tests/pkg/utils/mco_pods.go | 64 + tests/pkg/utils/mco_router_ca.go | 31 + tests/pkg/utils/mco_sa.go | 49 + tests/pkg/utils/mco_statefulset.go | 35 + tests/pkg/utils/options.go | 81 + tests/pkg/utils/utils.go | 707 +++ tests/resources/.gitignore | 2 + tests/resources/env.list.template | 4 + tests/resources/options.yaml.template | 4 + tests/run-in-kind/env.sh | 1 + .../grafana/grafana-config-test.yaml | 31 + .../grafana/grafana-datasources-test.yaml | 23 + tests/run-in-kind/grafana/grafana-svc.yaml | 18 + tests/run-in-kind/kind/kind-hub.config.yaml | 17 + .../req_crds/clusteroperators-crd.yaml | 161 + tests/run-in-kind/req_crds/ingresses-crd.yaml | 58 + .../run-in-kind/req_crds/servicecas-crd.yaml | 162 + tests/run-in-kind/router/route_crd.yaml | 40 + tests/run-in-kind/router/router.yaml | 54 + tests/run-in-kind/router/router_rbac.yaml | 68 + tests/run-in-kind/run-e2e-in-kind.sh | 79 + tests/run-in-kind/service-ca/00_roles.yaml | 12 + .../run-in-kind/service-ca/01_namespace.yaml | 9 + tests/run-in-kind/service-ca/02_service.yaml | 19 + tests/run-in-kind/service-ca/03_cm.yaml | 9 + .../service-ca/03_operator.cr.yaml | 8 + tests/run-in-kind/service-ca/04_sa.yaml | 7 + tests/run-in-kind/service-ca/05_deploy.yaml | 66 + .../service-ca/07_clusteroperator.yaml | 9 + .../templates/cluster-monitoring-view.yaml | 11 + tools/README.md | 88 + tools/example/custom-dashboard.yaml | 12 + ...bservability-metrics-custom-allowlist.yaml | 9 + tools/generate-dashboard-configmap-yaml.sh | 139 + tools/setup-grafana-dev.sh | 180 + tools/simulator/alert-forward/README.md | 61 + tools/simulator/alert-forward/main.go | 168 + tools/simulator/managed-cluster/README.md | 55 + .../managed-cluster/setup-managedcluster.sh | 38 + tools/simulator/metrics-collector/Dockerfile | 3 + tools/simulator/metrics-collector/Makefile | 43 + tools/simulator/metrics-collector/README.md | 53 + .../clean-metrics-collector.sh | 29 + .../metrics-collector-view.yaml | 14 + .../setup-metrics-collector.sh | 97 + tools/switch-to-grafana-admin.sh | 111 + 518 files changed, 81048 insertions(+) create mode 100644 CONTRIBUTING.md create mode 100644 DCO create mode 100644 Jenkinsfile create mode 100644 Jenkinsfile_upgrade create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 OWNERS create mode 100644 REMEDIATE.md create mode 100644 SECURITY.md create mode 100644 cicd-scripts/Configfile create mode 100755 cicd-scripts/build.sh create mode 100755 cicd-scripts/copyright-check.sh create mode 100755 cicd-scripts/customize-mco.sh create mode 100755 cicd-scripts/deploy-to-cluster.sh create mode 100755 cicd-scripts/install-dependencies.sh create mode 100755 cicd-scripts/run-e2e-in-kind-via-prow.sh create mode 100755 cicd-scripts/run-e2e-tests.sh create mode 100755 cicd-scripts/run-unit-tests.sh create mode 100755 cicd-scripts/setup-e2e-tests.sh create mode 100755 cicd-scripts/update-check-mco-csv.sh create mode 100644 collectors/metrics/Dockerfile create mode 100644 collectors/metrics/OWNERS create mode 100644 collectors/metrics/README.md create mode 100644 collectors/metrics/cmd/metrics-collector/main.go create mode 100644 collectors/metrics/cmd/metrics-collector/main_test.go create mode 100644 collectors/metrics/pkg/forwarder/forwarder.go create mode 100644 collectors/metrics/pkg/forwarder/forwarder_test.go create mode 100644 collectors/metrics/pkg/http/client.go create mode 100644 collectors/metrics/pkg/http/roundtripper.go create mode 100644 collectors/metrics/pkg/http/routes.go create mode 100644 collectors/metrics/pkg/logger/logger.go create mode 100644 collectors/metrics/pkg/metricfamily/anonymize.go create mode 100644 collectors/metrics/pkg/metricfamily/count.go create mode 100644 collectors/metrics/pkg/metricfamily/drop_timestamp.go create mode 100644 collectors/metrics/pkg/metricfamily/drop_timestamp_test.go create mode 100644 collectors/metrics/pkg/metricfamily/drop_unsorted.go create mode 100644 collectors/metrics/pkg/metricfamily/elide.go create mode 100644 collectors/metrics/pkg/metricfamily/elide_test.go create mode 100644 collectors/metrics/pkg/metricfamily/empty.go create mode 100644 collectors/metrics/pkg/metricfamily/expired.go create mode 100644 collectors/metrics/pkg/metricfamily/invalid.go create mode 100644 collectors/metrics/pkg/metricfamily/label.go create mode 100644 collectors/metrics/pkg/metricfamily/multi_transformer.go create mode 100644 collectors/metrics/pkg/metricfamily/none.go create mode 100644 collectors/metrics/pkg/metricfamily/overwrite.go create mode 100644 collectors/metrics/pkg/metricfamily/pack.go create mode 100644 collectors/metrics/pkg/metricfamily/rename.go create mode 100644 collectors/metrics/pkg/metricfamily/required.go create mode 100644 collectors/metrics/pkg/metricfamily/sort.go create mode 100644 collectors/metrics/pkg/metricfamily/transform.go create mode 100644 collectors/metrics/pkg/metricfamily/transform_test.go create mode 100644 collectors/metrics/pkg/metricfamily/unsorted.go create mode 100644 collectors/metrics/pkg/metricfamily/whitelist.go create mode 100644 collectors/metrics/pkg/metricfamily/whitelist_test.go create mode 100644 collectors/metrics/pkg/metricsclient/metricsclient.go create mode 100644 collectors/metrics/pkg/metricsclient/metricsclient_test.go create mode 100644 collectors/metrics/pkg/reader/reader.go create mode 100644 collectors/metrics/pkg/reader/reader_test.go create mode 100644 collectors/metrics/pkg/simulator/simulator.go create mode 100644 collectors/metrics/pkg/simulator/simulator_test.go create mode 100644 collectors/metrics/pkg/status/status.go create mode 100644 collectors/metrics/pkg/status/status_test.go create mode 100755 collectors/metrics/test/integration/clean.sh create mode 100644 collectors/metrics/test/integration/kind/kind-hub.config.yaml create mode 100644 collectors/metrics/test/integration/manifests/client-serving-certs-ca-bundle.yaml create mode 100644 collectors/metrics/test/integration/manifests/deployment.yaml create mode 100644 collectors/metrics/test/integration/manifests/metrics-collector-cert.yaml create mode 100644 collectors/metrics/test/integration/manifests/observatorium-api-configmap.yaml create mode 100644 collectors/metrics/test/integration/manifests/observatorium-api-secret.yaml create mode 100644 collectors/metrics/test/integration/manifests/observatorium-api-service.yaml create mode 100644 collectors/metrics/test/integration/manifests/observatorium-api.yaml create mode 100644 collectors/metrics/test/integration/manifests/observatorium-ca-cert.yaml create mode 100644 collectors/metrics/test/integration/manifests/rolebinding.yaml create mode 100644 collectors/metrics/test/integration/manifests/thanos-api.yaml create mode 100644 collectors/metrics/test/integration/manifests/thanos-configmap.yaml create mode 100644 collectors/metrics/test/integration/manifests/thanos-pvc.yaml create mode 100644 collectors/metrics/test/integration/manifests/thanos-service.yaml create mode 100755 collectors/metrics/test/integration/prereq.sh create mode 100755 collectors/metrics/test/integration/setup.sh create mode 100644 collectors/metrics/testdata/service-ca.crt create mode 100644 collectors/metrics/testdata/timeseries.txt create mode 100644 collectors/metrics/testdata/tls/ca.crt create mode 100644 collectors/metrics/testdata/tls/tls.crt create mode 100644 collectors/metrics/testdata/tls/tls.key create mode 100644 collectors/metrics/testdata/token create mode 100644 docs/MoreAboutPersistentStorage.md create mode 100644 docs/MultiClusterObservability-CRD.md create mode 100644 docs/images/multicluster-observability-operator.png create mode 100644 docs/images/observability_overview_in_ocm.png create mode 100644 docs/scale-perf.md create mode 100644 docs/setup-ceph-for-object-storage.md create mode 100644 docs/setup-ocs-for-object-storage.md create mode 100644 examples/alerts/custom_rules_invalid/kustomization.yaml create mode 100644 examples/alerts/custom_rules_invalid/thanos-ruler-custom-rules-invalid.yaml create mode 100644 examples/alerts/custom_rules_valid/kustomization.yaml create mode 100644 examples/alerts/custom_rules_valid/thanos-ruler-custom-rules-valid.yaml create mode 100644 examples/ceph/cluster.yaml create mode 100644 examples/ceph/object-user.yaml create mode 100644 examples/ceph/object.yaml create mode 100644 examples/ceph/operator.yaml create mode 100644 examples/ceph/scc.yaml create mode 100644 examples/ceph/toolbox.yaml create mode 100644 examples/dashboards/kubernetes_pvc_dashboard/custom-kubernetes-pvc-dashboard.yaml create mode 100644 examples/dashboards/kubernetes_pvc_dashboard/custom-metrics-allowlist.yaml create mode 100644 examples/dashboards/kubernetes_pvc_dashboard/custom-number-of-clusters.yaml create mode 100644 examples/dashboards/kubernetes_pvc_dashboard/dashboard_subscription.yaml create mode 100644 examples/dashboards/kubernetes_pvc_dashboard/kustomization.yaml create mode 100644 examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml create mode 100644 examples/dashboards/sample_custom_dashboard/kustomization.yaml create mode 100644 examples/dashboards/update_sample_custom_dashboard/kustomization.yaml create mode 100644 examples/dashboards/update_sample_custom_dashboard/update-custom-sample-dashboard.yaml create mode 100644 examples/mco/e2e/v1beta1/kustomization.yaml create mode 100644 examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml create mode 100644 examples/mco/e2e/v1beta1/observability.yaml create mode 100644 examples/mco/e2e/v1beta2/kustomization.yaml create mode 100644 examples/mco/e2e/v1beta2/observability.yaml create mode 100644 examples/metrics/allowlist/custom-metrics-allowlist.yaml create mode 100644 examples/metrics/allowlist/kustomization.yaml create mode 100644 examples/minio/kustomization.yaml create mode 100644 examples/minio/minio-deployment.yaml create mode 100644 examples/minio/minio-pvc.yaml create mode 100644 examples/minio/minio-secret.yaml create mode 100644 examples/minio/minio-service.yaml create mode 100644 examples/policy/kustomization.yaml create mode 100644 examples/policy/limitRange.yaml create mode 100644 examples/policy/resourceQuota.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 hack/boilerplate.go.txt create mode 100644 loaders/dashboards/Dockerfile create mode 100644 loaders/dashboards/OWNERS create mode 100644 loaders/dashboards/README.md create mode 100644 loaders/dashboards/cmd/main.go create mode 100644 loaders/dashboards/examples/k8s-dashboard.yaml create mode 100644 loaders/dashboards/pkg/controller/dashboard_controller.go create mode 100644 loaders/dashboards/pkg/controller/dashboard_controller_test.go create mode 100644 loaders/dashboards/pkg/util/grafana_util.go create mode 100644 loaders/dashboards/pkg/util/grafana_util_test.go create mode 100644 operators/endpointmetrics/Dockerfile create mode 100644 operators/endpointmetrics/OWNERS create mode 100644 operators/endpointmetrics/README.md create mode 100644 operators/endpointmetrics/config/certmanager/certificate.yaml create mode 100644 operators/endpointmetrics/config/certmanager/kustomization.yaml create mode 100644 operators/endpointmetrics/config/certmanager/kustomizeconfig.yaml create mode 100644 operators/endpointmetrics/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml create mode 100644 operators/endpointmetrics/config/crd/kustomization.yaml create mode 100644 operators/endpointmetrics/config/default/kustomization.yaml create mode 100644 operators/endpointmetrics/config/manager/kustomization.yaml create mode 100644 operators/endpointmetrics/config/manager/manager.yaml create mode 100644 operators/endpointmetrics/config/rbac/emo_role.yaml create mode 100644 operators/endpointmetrics/config/rbac/emo_rolebinding.yaml create mode 100644 operators/endpointmetrics/config/rbac/emo_serviceaccount.yaml create mode 100644 operators/endpointmetrics/config/rbac/kustomization.yaml create mode 100644 operators/endpointmetrics/config/samples/kustomization.yaml create mode 100644 operators/endpointmetrics/config/samples/observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config_test.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource_test.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/predicate_func.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/predicate_func_test.go create mode 100644 operators/endpointmetrics/controllers/status/status_controller.go create mode 100644 operators/endpointmetrics/controllers/status/status_controller_test.go create mode 100644 operators/endpointmetrics/doc/API_design.md create mode 100644 operators/endpointmetrics/doc/API_design_future.md create mode 100644 operators/endpointmetrics/doc/ObservabilityAddon_Flow_Sprint1.png create mode 100644 operators/endpointmetrics/doc/ObservabilityAddon_Flow_Sprint2.png create mode 100644 operators/endpointmetrics/main.go create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-prometheus-rules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-state-metrics-clusterRole.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-state-metrics-clusterRoleBinding.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-state-metrics-deployment.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-state-metrics-service.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kube-state-metrics-serviceAccount.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-alertingrules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/kustomization.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRoleBinding.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-service.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-serviceAccount.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-clusterRole.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-clusterRoleBinding.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-role-default.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-role-kube-system.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-role.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-default.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-kube-system.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-service.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-serviceAccount.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml create mode 100644 operators/endpointmetrics/pkg/rendering/renderer.go create mode 100644 operators/endpointmetrics/pkg/rendering/renderer_test.go create mode 100644 operators/endpointmetrics/pkg/rendering/templates/templates.go create mode 100644 operators/endpointmetrics/pkg/rendering/templates/templates_test.go create mode 100644 operators/endpointmetrics/pkg/util/client.go create mode 100644 operators/endpointmetrics/pkg/util/lease.go create mode 100644 operators/endpointmetrics/pkg/util/status.go create mode 100644 operators/endpointmetrics/pkg/util/status_test.go create mode 100644 operators/endpointmetrics/version/version.go create mode 100644 operators/multiclusterobservability/Dockerfile create mode 100644 operators/multiclusterobservability/Makefile create mode 100644 operators/multiclusterobservability/OWNERS create mode 100644 operators/multiclusterobservability/PROJECT create mode 100644 operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go create mode 100644 operators/multiclusterobservability/api/shared/zz_generated.deepcopy.go create mode 100644 operators/multiclusterobservability/api/v1beta1/groupversion_info.go create mode 100644 operators/multiclusterobservability/api/v1beta1/multiclusterobservability_conversion.go create mode 100644 operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go create mode 100644 operators/multiclusterobservability/api/v1beta1/observabilityaddon_types.go create mode 100644 operators/multiclusterobservability/api/v1beta1/zz_generated.deepcopy.go create mode 100644 operators/multiclusterobservability/api/v1beta2/groupversion_info.go create mode 100644 operators/multiclusterobservability/api/v1beta2/multiclusterobservability_conversion.go create mode 100644 operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go create mode 100644 operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go create mode 100644 operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go create mode 100644 operators/multiclusterobservability/bundle.Dockerfile create mode 100644 operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml create mode 100644 operators/multiclusterobservability/bundle/manifests/manager-config_v1_configmap.yaml create mode 100644 operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml create mode 100644 operators/multiclusterobservability/bundle/manifests/multicluster-observability-webhook-service_v1_service.yaml create mode 100644 operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml create mode 100644 operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_observabilityaddons.yaml create mode 100644 operators/multiclusterobservability/bundle/metadata/annotations.yaml create mode 100644 operators/multiclusterobservability/bundle/tests/scorecard/config.yaml create mode 100644 operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml create mode 100644 operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml create mode 100644 operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml create mode 100644 operators/multiclusterobservability/config/crd/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/crd/patches/webhook_multiclusterobservabilities_cainjection_patch.yaml create mode 100644 operators/multiclusterobservability/config/default/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/manager/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/manager/manager.yaml create mode 100644 operators/multiclusterobservability/config/manager/manager_webhook_patch.yaml create mode 100644 operators/multiclusterobservability/config/manifests/bases/multicluster-observability-operator.clusterserviceversion.yaml create mode 100644 operators/multiclusterobservability/config/manifests/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/rbac/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/rbac/mco_role.yaml create mode 100644 operators/multiclusterobservability/config/rbac/mco_role_binding.yaml create mode 100644 operators/multiclusterobservability/config/rbac/mco_service_account.yaml create mode 100644 operators/multiclusterobservability/config/rbac/role.yaml create mode 100644 operators/multiclusterobservability/config/samples/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/samples/observability_v1beta1_multiclusterobservability.yaml create mode 100644 operators/multiclusterobservability/config/samples/observability_v1beta1_observabilityaddon.yaml create mode 100644 operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml create mode 100644 operators/multiclusterobservability/config/webhook/kustomization.yaml create mode 100644 operators/multiclusterobservability/config/webhook/manifests.yaml create mode 100644 operators/multiclusterobservability/config/webhook/service.yaml create mode 100644 operators/multiclusterobservability/config/webhook/validatingwebhookconfiguration.yaml create mode 100644 operators/multiclusterobservability/config/webhook/webhook_cainjection_patch.yaml create mode 100644 operators/multiclusterobservability/config/webhook/webhook_service_cert_patch.yaml create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/grafana_test.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status_test.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go create mode 100644 operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/customize_img.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/manifestwork.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/namespace.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/namespace_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/obsaddon.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/role.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/role_test.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/status.go create mode 100644 operators/multiclusterobservability/controllers/placementrule/status_test.go create mode 100644 operators/multiclusterobservability/main.go create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alert_rules.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrole.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrolebinding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-serviceaccount.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-cabundle.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrole.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrolebinding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-config.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-operated.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-proxy.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-service.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-serviceaccount.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/config/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/cluster-role-binding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/cluster-role.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/config.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-apiserver.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-etcd.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server-cluster.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/deployment.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/ingress.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/service-account.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/service-monitor.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/service.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/cluster_role.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/cluster_role_binding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/operator.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/prometheus_role.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/prometheus_role_binding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/observatorium/service-account.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/cluster-role-binding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/cluster-role.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/cookie-secret.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/deployment.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/ingress.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/prob-cmd-configmap.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/service-account.yaml create mode 100644 operators/multiclusterobservability/manifests/base/proxy/service.yaml create mode 100644 operators/multiclusterobservability/manifests/base/thanos/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrole.yaml create mode 100644 operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrolebinding.yaml create mode 100644 operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-config.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/aggregate_role.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/images.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/kustomization.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_crd.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_v1beta1_crd.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/role.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/role_binding.yaml create mode 100644 operators/multiclusterobservability/manifests/endpoint-observability/service_account.yaml create mode 100644 operators/multiclusterobservability/pkg/certificates/approver.go create mode 100644 operators/multiclusterobservability/pkg/certificates/approver_test.go create mode 100644 operators/multiclusterobservability/pkg/certificates/cert_agent.go create mode 100644 operators/multiclusterobservability/pkg/certificates/cert_agent_test.go create mode 100644 operators/multiclusterobservability/pkg/certificates/cert_controller.go create mode 100644 operators/multiclusterobservability/pkg/certificates/cert_controller_test.go create mode 100644 operators/multiclusterobservability/pkg/certificates/certificates.go create mode 100644 operators/multiclusterobservability/pkg/certificates/certificates_test.go create mode 100644 operators/multiclusterobservability/pkg/certificates/signer.go create mode 100644 operators/multiclusterobservability/pkg/certificates/signer_test.go create mode 100644 operators/multiclusterobservability/pkg/config/azure_conf.go create mode 100644 operators/multiclusterobservability/pkg/config/config.go create mode 100644 operators/multiclusterobservability/pkg/config/config_test.go create mode 100644 operators/multiclusterobservability/pkg/config/gcs_conf.go create mode 100644 operators/multiclusterobservability/pkg/config/obj_storage_conf.go create mode 100644 operators/multiclusterobservability/pkg/config/obj_storage_conf_test.go create mode 100644 operators/multiclusterobservability/pkg/config/s3_conf.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer_grafana.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer_proxy.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer_test.go create mode 100644 operators/multiclusterobservability/pkg/rendering/renderer_thanos.go create mode 100644 operators/multiclusterobservability/pkg/rendering/templates/templates.go create mode 100644 operators/multiclusterobservability/pkg/rendering/templates/templates_test.go create mode 100644 operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go create mode 100644 operators/multiclusterobservability/pkg/servicemonitor/sm_controller_test.go create mode 100644 operators/multiclusterobservability/pkg/util/client.go create mode 100644 operators/multiclusterobservability/pkg/util/clustermanagementaddon.go create mode 100644 operators/multiclusterobservability/pkg/util/clustermanagementaddon_test.go create mode 100644 operators/multiclusterobservability/pkg/util/managedclusteraddon.go create mode 100644 operators/multiclusterobservability/pkg/util/managedclusteraddon_test.go create mode 100644 operators/multiclusterobservability/pkg/webhook/webhook_controller.go create mode 100644 operators/multiclusterobservability/pkg/webhook/webhook_controller_test.go create mode 100755 operators/multiclusterobservability/prestop.sh create mode 100644 operators/pkg/config/config.go create mode 100644 operators/pkg/deploying/deployer.go create mode 100644 operators/pkg/deploying/deployer_test.go create mode 100644 operators/pkg/rendering/patching/patcher.go create mode 100644 operators/pkg/rendering/patching/patcher_test.go create mode 100644 operators/pkg/rendering/renderer.go create mode 100644 operators/pkg/rendering/templates/templates.go create mode 100644 operators/pkg/util/obj_compare.go create mode 100644 operators/pkg/util/obj_compare_test.go create mode 100644 operators/pkg/util/util.go create mode 100644 proxy/Dockerfile create mode 100644 proxy/OWNERS create mode 100644 proxy/README.md create mode 100644 proxy/cmd/main.go create mode 100644 proxy/deploy/cluster-role-binding.yaml create mode 100644 proxy/deploy/cluster-role.yaml create mode 100644 proxy/deploy/deployment.yaml create mode 100644 proxy/deploy/kustomization.yaml create mode 100644 proxy/deploy/service-account.yaml create mode 100644 proxy/deploy/service.yaml create mode 100644 proxy/examples/managedcluster/cluster1.yaml create mode 100644 proxy/examples/managedcluster/cluster2.yaml create mode 100644 proxy/examples/rbac/README.md create mode 100644 proxy/examples/rbac/admin-rbac.yaml create mode 100644 proxy/examples/rbac/user1-rbac.yaml create mode 100644 proxy/examples/rbac/user2-rbac.yaml create mode 100644 proxy/pkg/proxy/proxy.go create mode 100644 proxy/pkg/proxy/proxy_test.go create mode 100644 proxy/pkg/proxy/tls.go create mode 100644 proxy/pkg/rewrite/rewrite.go create mode 100644 proxy/pkg/rewrite/rewrite_test.go create mode 100644 proxy/pkg/util/user_project.go create mode 100644 proxy/pkg/util/user_project_test.go create mode 100644 proxy/pkg/util/util.go create mode 100644 proxy/pkg/util/util_test.go create mode 100644 sonar-project.properties create mode 100644 tests/Dockerfile create mode 100644 tests/OWNERS create mode 100644 tests/README.md create mode 100644 tests/benchmark/README.md create mode 100755 tests/benchmark/clean-metrics-collector.sh create mode 100644 tests/benchmark/metrics-collector-view.yaml create mode 100755 tests/benchmark/setup-metrics-collector.sh create mode 100755 tests/format-results.sh create mode 100755 tests/grafana-dev-test.sh create mode 100644 tests/pkg/kustomize/render.go create mode 100644 tests/pkg/kustomize/render_test.go create mode 100644 tests/pkg/kustomize/tests/kustomization.yaml create mode 100644 tests/pkg/kustomize/tests/thanos-ruler-custom-rules-valid.yaml create mode 100644 tests/pkg/testdata/ignored-metric-list create mode 100644 tests/pkg/tests/observability-e2e-test_suite_test.go create mode 100644 tests/pkg/tests/observability_addon_test.go create mode 100644 tests/pkg/tests/observability_alert_test.go create mode 100644 tests/pkg/tests/observability_certrenew_test.go create mode 100644 tests/pkg/tests/observability_config_test.go create mode 100644 tests/pkg/tests/observability_dashboard_test.go create mode 100644 tests/pkg/tests/observability_endpoint_preserve_test.go create mode 100644 tests/pkg/tests/observability_grafana_dev_test.go create mode 100644 tests/pkg/tests/observability_grafana_test.go create mode 100644 tests/pkg/tests/observability_install_test.go create mode 100644 tests/pkg/tests/observability_manifestwork_test.go create mode 100644 tests/pkg/tests/observability_metrics_test.go create mode 100644 tests/pkg/tests/observability_observatorium_preserve_test.go create mode 100644 tests/pkg/tests/observability_reconcile_test.go create mode 100644 tests/pkg/tests/observability_retention_test.go create mode 100644 tests/pkg/tests/observability_route_test.go create mode 100644 tests/pkg/tests/observability_uninstall_test.go create mode 100644 tests/pkg/utils/client.go create mode 100644 tests/pkg/utils/cluster_deploy.go create mode 100644 tests/pkg/utils/install_config.go create mode 100644 tests/pkg/utils/mco_cert_secret.go create mode 100644 tests/pkg/utils/mco_clusterrolebinding.go create mode 100644 tests/pkg/utils/mco_configmaps.go create mode 100644 tests/pkg/utils/mco_dashboard.go create mode 100644 tests/pkg/utils/mco_deploy.go create mode 100644 tests/pkg/utils/mco_deployments.go create mode 100644 tests/pkg/utils/mco_grafana.go create mode 100644 tests/pkg/utils/mco_managedcluster.go create mode 100644 tests/pkg/utils/mco_metric.go create mode 100644 tests/pkg/utils/mco_namespace.go create mode 100644 tests/pkg/utils/mco_oba.go create mode 100644 tests/pkg/utils/mco_pods.go create mode 100644 tests/pkg/utils/mco_router_ca.go create mode 100644 tests/pkg/utils/mco_sa.go create mode 100644 tests/pkg/utils/mco_statefulset.go create mode 100644 tests/pkg/utils/options.go create mode 100644 tests/pkg/utils/utils.go create mode 100644 tests/resources/.gitignore create mode 100644 tests/resources/env.list.template create mode 100644 tests/resources/options.yaml.template create mode 100755 tests/run-in-kind/env.sh create mode 100644 tests/run-in-kind/grafana/grafana-config-test.yaml create mode 100644 tests/run-in-kind/grafana/grafana-datasources-test.yaml create mode 100644 tests/run-in-kind/grafana/grafana-svc.yaml create mode 100644 tests/run-in-kind/kind/kind-hub.config.yaml create mode 100644 tests/run-in-kind/req_crds/clusteroperators-crd.yaml create mode 100644 tests/run-in-kind/req_crds/ingresses-crd.yaml create mode 100644 tests/run-in-kind/req_crds/servicecas-crd.yaml create mode 100644 tests/run-in-kind/router/route_crd.yaml create mode 100644 tests/run-in-kind/router/router.yaml create mode 100644 tests/run-in-kind/router/router_rbac.yaml create mode 100755 tests/run-in-kind/run-e2e-in-kind.sh create mode 100644 tests/run-in-kind/service-ca/00_roles.yaml create mode 100644 tests/run-in-kind/service-ca/01_namespace.yaml create mode 100644 tests/run-in-kind/service-ca/02_service.yaml create mode 100644 tests/run-in-kind/service-ca/03_cm.yaml create mode 100644 tests/run-in-kind/service-ca/03_operator.cr.yaml create mode 100644 tests/run-in-kind/service-ca/04_sa.yaml create mode 100644 tests/run-in-kind/service-ca/05_deploy.yaml create mode 100644 tests/run-in-kind/service-ca/07_clusteroperator.yaml create mode 100644 tests/run-in-kind/templates/cluster-monitoring-view.yaml create mode 100644 tools/README.md create mode 100644 tools/example/custom-dashboard.yaml create mode 100644 tools/example/observability-metrics-custom-allowlist.yaml create mode 100755 tools/generate-dashboard-configmap-yaml.sh create mode 100755 tools/setup-grafana-dev.sh create mode 100644 tools/simulator/alert-forward/README.md create mode 100644 tools/simulator/alert-forward/main.go create mode 100644 tools/simulator/managed-cluster/README.md create mode 100755 tools/simulator/managed-cluster/setup-managedcluster.sh create mode 100644 tools/simulator/metrics-collector/Dockerfile create mode 100644 tools/simulator/metrics-collector/Makefile create mode 100644 tools/simulator/metrics-collector/README.md create mode 100755 tools/simulator/metrics-collector/clean-metrics-collector.sh create mode 100644 tools/simulator/metrics-collector/metrics-collector-view.yaml create mode 100755 tools/simulator/metrics-collector/setup-metrics-collector.sh create mode 100755 tools/switch-to-grafana-admin.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..0d73f533d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing guidelines + +## Contributions + +All contributions to the repository must be submitted under the terms of the [Apache Public License 2.0](https://www.apache.org/licenses/LICENSE-2.0). + +## Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +## DCO Sign Off + +You must sign your commit to state that you certify the [DCO](DCO). To sign your commit, add a line like the following at the end of your commit message: + +``` +Signed-off-by: John Smith +``` + +This can be done with the `--signoff` option to `git commit`. See the [Git documentation](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s) for details. You can also mass sign-off a whole pull request with `git rebase --signoff main`, replacing `main` with the branch you are creating a pull request from. + +## Contributing A Patch + +1. Submit an issue describing your proposed change to the repo in question. +1. The [repo owners](OWNERS) will respond to your issue promptly. +1. Fork the desired repo, develop and test your code changes. +1. Submit a pull request. + +## Issue and Pull Request Management + +Anyone may comment on issues and submit reviews for pull requests. However, in +order to be assigned an issue or pull request, you must be a member of the +[stolostron](https://github.com/stolostron) GitHub organization. + +Repo maintainers can assign you an issue or pull request by leaving a +`/assign ` comment on the issue or pull request. + +## Pre-check before submitting a PR + +After your PR is ready to commit, please run following commands to check your code. + +```shell +make -f Makefile.prow test-unit +make -f Makefile.prow manager +``` + +## Build images + +Make sure your code build passed. + +```shell +make docker-build -f Dockerfile.prow +``` \ No newline at end of file diff --git a/DCO b/DCO new file mode 100644 index 000000000..8201f9921 --- /dev/null +++ b/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..63ef4ad86 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,95 @@ +pipeline { + agent { + docker { + image 'quay.io/rhn_support_abutt/ginkgo_1_14_2-linux-go' + args '--network host -u 0:0' + } + } + parameters { + string(name:'HUB_CLUSTER_NAME', defaultValue: '', description: 'Name of Hub cluster') + string(name:'BASE_DOMAIN', defaultValue: '', description: 'Base domain of Hub cluster') + string(name:'OC_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'OCP Hub User Name') + string(name:'OC_HUB_CLUSTER_PASS', defaultValue: '', description: 'OCP Hub Password') + string(name:'OC_HUB_CLUSTER_API_URL', defaultValue: '', description: 'OCP Hub API URL') + string(name:'MANAGED_CLUSTER_NAME', defaultValue: '', description: 'Managed cluster name') + string(name:'MANAGED_CLUSTER_BASE_DOMAIN', defaultValue: '', description: 'Managed cluster base domain') + string(name:'MANAGED_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'Managed Cluster User Name') + string(name:'MANAGED_CLUSTER_PASS', defaultValue: '', description: 'Managed cluster Password') + string(name:'MANAGED_CLUSTER_API_URL', defaultValue: '', description: 'Managed cluster API URL') + string(name:'BUCKET', defaultValue: 'obs-v1', description: 'Bucket name') + string(name:'REGION', defaultValue: 'us-east-1', description: 'Bucket region') + password(name:'AWS_ACCESS_KEY_ID', defaultValue: '', description: 'AWS access key ID') + password(name:'AWS_SECRET_ACCESS_KEY', defaultValue: '', description: 'AWS secret access key') + string(name:'SKIP_INSTALL_STEP', defaultValue: 'false', description: 'Skip Observability installation') + string(name:'SKIP_UNINSTALL_STEP', defaultValue: 'true', description: 'Skip Observability uninstallation') + string(name:'USE_MINIO', defaultValue: 'false', description: 'If no AWS S3 bucket, you could use minio as object storage to instead') + } + environment { + CI = 'true' + AWS_SECRET_ACCESS_KEY = credentials('cqu_aws_secret_access_key') + AWS_ACCESS_KEY_ID = credentials('cqu_aws_access_key') + } + stages { + stage('Test Run') { + steps { + sh """ + export OC_CLUSTER_USER="${params.OC_CLUSTER_USER}" + export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" + export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" + export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" + export BASE_DOMAIN="${params.BASE_DOMAIN}" + export MANAGED_CLUSTER_NAME="${params.MANAGED_CLUSTER_NAME}" + export MANAGED_CLUSTER_BASE_DOMAIN="${params.MANAGED_CLUSTER_BASE_DOMAIN}" + export MANAGED_CLUSTER_USER="${params.MANAGED_CLUSTER_USER}" + export MANAGED_CLUSTER_PASS="${params.MANAGED_CLUSTER_PASS}" + export MANAGED_CLUSTER_API_URL="${params.MANAGED_CLUSTER_API_URL}" + export BUCKET="${params.BUCKET}" + export REGION="${params.REGION}" + export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" + export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" + + if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then + export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" + fi + + if [[ -n "${params.AWS_SECRET_ACCESS_KEY}" ]]; then + export AWS_SECRET_ACCESS_KEY="${params.AWS_SECRET_ACCESS_KEY}" + fi + + if [[ "${!params.USE_MINIO}" == false ]]; then + export IS_CANARY_ENV=true + fi + + if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then + echo "Aborting test.. OCP HUB details are required for the test execution" + exit 1 + else + oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL + oc config view --minify --raw=true > ~/.kube/managed_kubeconfig + export MAKUBECONFIG=~/.kube/managed_kubeconfig + oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL + export KUBECONFIG=~/.kube/config + go mod vendor && ginkgo build ./tests/pkg/tests/ + cd tests + cp resources/options.yaml.template resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.name="'"\$HUB_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.baseDomain="'"\$BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.name="'"\$MANAGED_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"\$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"\$MAKUBECONFIG"'"' resources/options.yaml + cat resources/options.yaml + ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + fi + """ + } + } + + + } + post { + always { + archiveArtifacts artifacts: 'tests/pkg/tests/*.xml', followSymlinks: false + junit 'tests/pkg/tests/*.xml' + } + } +} diff --git a/Jenkinsfile_upgrade b/Jenkinsfile_upgrade new file mode 100644 index 000000000..e4a9a1c71 --- /dev/null +++ b/Jenkinsfile_upgrade @@ -0,0 +1,79 @@ +pipeline { + agent { + docker { + image 'quay.io/rhn_support_abutt/ginkgo_1_14_2-linux-go' + args '--network host -u 0:0' + } + } + parameters { + string(name:'HUB_CLUSTER_NAME', defaultValue: '', description: 'Name of Hub cluster') + string(name:'BASE_DOMAIN', defaultValue: '', description: 'Base domain of Hub cluster') + string(name:'OC_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'OCP Hub User Name') + string(name:'OC_HUB_CLUSTER_PASS', defaultValue: '', description: 'OCP Hub Password') + string(name:'OC_HUB_CLUSTER_API_URL', defaultValue: '', description: 'OCP Hub API URL') + string(name:'BUCKET', defaultValue: 'obs-v1', description: 'Bucket name') + string(name:'REGION', defaultValue: 'us-east-1', description: 'Bucket region') + password(name:'AWS_ACCESS_KEY_ID', defaultValue: '', description: 'AWS access key ID') + password(name:'AWS_SECRET_ACCESS_KEY', defaultValue: '', description: 'AWS secret access key') + string(name:'SKIP_INSTALL_STEP', defaultValue: 'false', description: 'Skip Observability installation') + string(name:'SKIP_UNINSTALL_STEP', defaultValue: 'true', description: 'Skip Observability uninstallation') + string(name:'USE_MINIO', defaultValue: 'false', description: 'If no AWS S3 bucket, you could use minio as object storage to instead') + } + environment { + CI = 'true' + AWS_SECRET_ACCESS_KEY = credentials('cqu_aws_secret_access_key') + AWS_ACCESS_KEY_ID = credentials('cqu_aws_access_key') + } + stages { + stage('Test Run') { + steps { + sh """ + export OC_CLUSTER_USER="${params.OC_CLUSTER_USER}" + export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" + export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" + export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" + export BASE_DOMAIN="${params.BASE_DOMAIN}" + export BUCKET="${params.BUCKET}" + export REGION="${params.REGION}" + export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" + export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" + + if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then + export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" + fi + + if [[ -n "${params.AWS_SECRET_ACCESS_KEY}" ]]; then + export AWS_SECRET_ACCESS_KEY="${params.AWS_SECRET_ACCESS_KEY}" + fi + + if [[ "${!params.USE_MINIO}" == false ]]; then + export IS_CANARY_ENV=true + fi + + if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then + echo "Aborting test.. OCP HUB details are required for the test execution" + exit 1 + else + oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL + export KUBECONFIG=~/.kube/config + go mod vendor && ginkgo build ./tests/pkg/tests/ + cd tests + cp resources/options.yaml.template resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.name="'"\$HUB_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.baseDomain="'"\$BASE_DOMAIN"'"' resources/options.yaml + cat resources/options.yaml + ginkgo --focus="BVT" -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + fi + """ + } + } + + + } + post { + always { + archiveArtifacts artifacts: 'tests/pkg/tests/*.xml', followSymlinks: false + junit 'tests/pkg/tests/*.xml' + } + } +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..cc8eada04 --- /dev/null +++ b/Makefile @@ -0,0 +1,58 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +-include /opt/build-harness/Makefile.prow + +# Image URL to use all building/pushing image targets +IMG ?= quay.io/stolostron/multicluster-observability-operator:latest + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: + cd operators/multiclusterobservability && make deploy + +# UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config +undeploy: + cd operators/multiclusterobservability && make undeploy + +# Build the docker image +docker-build: + cd operators/multiclusterobservability && make manager + docker build -t ${IMG} . -f operators/multiclusterobservability/Dockerfile + +# Push the docker image +docker-push: + docker push ${IMG} + +.PHONY: unit-tests +unit-tests: unit-tests-operators unit-tests-loaders unit-tests-proxy unit-tests-collectors + +unit-tests-operators: + go test `go list ./operators/... | grep -v test` + +unit-tests-loaders: + go test `go list ./loaders/... | grep -v test` + +unit-tests-proxy: + go test `go list ./proxy/... | grep -v test` + +unit-tests-collectors: + go test `go list ./collectors/... | grep -v test` + +.PHONY: e2e-tests +e2e-tests: + @echo "Running e2e tests ..." + @./cicd-scripts/run-e2e-tests.sh + +.PHONY: e2e-tests-in-kind +e2e-tests-in-kind: + @echo "Running e2e tests in KinD cluster..." +ifeq ($(OPENSHIFT_CI),true) + @./cicd-scripts/run-e2e-in-kind-via-prow.sh +else + @./tests/run-in-kind/run-e2e-in-kind.sh +endif + +# Generate bundle manifests and metadata, then validate generated files. +.PHONY: bundle +bundle: + cd operators/multiclusterobservability && make bundle diff --git a/OWNERS b/OWNERS new file mode 100644 index 000000000..519e50c83 --- /dev/null +++ b/OWNERS @@ -0,0 +1,9 @@ +approvers: +- clyang82 +- marcolan018 +- morvencao +- songleo + +reviewers: +- haoqing0110 +- bjoydeep diff --git a/REMEDIATE.md b/REMEDIATE.md new file mode 100644 index 000000000..80ac8f84f --- /dev/null +++ b/REMEDIATE.md @@ -0,0 +1,5 @@ +## This file changes will trigger rebuild and publish all images generated from this repo. + +### Tue Sep 7 10:46:16 CST 2021 + +https://github.com/stolostron/backlog/issues/15853 diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..1c6e25c9b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,4 @@ +# Security Response + +If you've found a security issue that you'd like to disclose confidentially please contact Red Hat's Product Security team. +Details at https://access.redhat.com/security/team/contact diff --git a/cicd-scripts/Configfile b/cicd-scripts/Configfile new file mode 100644 index 000000000..c75763ff2 --- /dev/null +++ b/cicd-scripts/Configfile @@ -0,0 +1,30 @@ +ifdef GIT +IMAGE_VERSION :=$(shell git rev-parse --short HEAD) +VCS_URL ?=$(shell git config --get remote.origin.url) +endif + +IMAGE_NAME =$(shell cat COMPONENT_NAME) +IMAGE_DISPLAY_NAME =Multicluster Monitoring Operator +ARCH = $(shell uname -m) +ifeq ($(ARCH), x86_64) + IMAGE_NAME_ARCH =$(IMAGE_NAME)-amd64 +else + IMAGE_NAME_ARCH =$(IMAGE_NAME)-$(ARCH) +endif +IMAGE_MAINTAINER =acm-contact@redhat.com +IMAGE_VENDOR =Red Hat +IMAGE_DESCRIPTION =Multicluster Monitoring Services +IMAGE_SUMMARY =$(IMAGE_DESCRIPTION) +IMAGE_OPENSHIFT_TAGS =Multicluster Monitoring + +DOCKER_BUILD_OPTS=--build-arg "VCS_REF=$(SEMVERSION)" \ + --build-arg "VCS_URL=$(VCS_URL)" \ + --build-arg "IMAGE_NAME=$(IMAGE_NAME)" \ + --build-arg "IMAGE_DISPLAY_NAME=$(IMAGE_DISPLAY_NAME)" \ + --build-arg "IMAGE_NAME_ARCH=$(IMAGE_NAME_ARCH)" \ + --build-arg "IMAGE_MAINTAINER=$(IMAGE_MAINTAINER)" \ + --build-arg "IMAGE_VENDOR=$(IMAGE_VENDOR)" \ + --build-arg "IMAGE_VERSION=$(IMAGE_VERSION)" \ + --build-arg "IMAGE_DESCRIPTION=$(IMAGE_DESCRIPTION)" \ + --build-arg "IMAGE_SUMMARY=$(IMAGE_SUMMARY)" \ + --build-arg "IMAGE_OPENSHIFT_TAGS=$(IMAGE_OPENSHIFT_TAGS)" \ No newline at end of file diff --git a/cicd-scripts/build.sh b/cicd-scripts/build.sh new file mode 100755 index 000000000..591ef8d36 --- /dev/null +++ b/cicd-scripts/build.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -e + +make docker-binary + +git config --global url."https://$GITHUB_TOKEN@github.com/stolostron".insteadOf "https://github.com/stolostron" + +echo "Building multicluster-observability-operator image" +export DOCKER_IMAGE_AND_TAG=${1} +export DOCKER_FILE=Dockerfile +make docker/build \ No newline at end of file diff --git a/cicd-scripts/copyright-check.sh b/cicd-scripts/copyright-check.sh new file mode 100755 index 000000000..fea69b260 --- /dev/null +++ b/cicd-scripts/copyright-check.sh @@ -0,0 +1,168 @@ +#!/bin/bash +############################################################################### +# (c) Copyright IBM Corporation 2019, 2020. All Rights Reserved. +# Note to U.S. Government Users Restricted Rights: +# U.S. Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +# Licensed Materials - Property of IBM +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project +############################################################################### + +#Project start year +origin_year=2016 +#Back up year if system time is null or incorrect +back_up_year=2019 +#Currrent year +current_year=$(date +"%Y") + +TRAVIS_BRANCH=$1 + +ADDED_SINCE_1_MAR_2020=$(git log --name-status --pretty=oneline --since "1 Mar 2020" | egrep "^A\t" | awk '{print $2}' | sort | uniq | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) +MODIFIED_SINCE_1_MAR_2020=$(diff --new-line-format="" --unchanged-line-format="" <(git log --name-status --pretty=oneline --since "1 Mar 2020" | egrep "^A\t|^M\t" | awk '{print $2}' | sort | uniq | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) <(git log --name-status --pretty=oneline --since "1 Mar 2020" | egrep "^A\t" | awk '{print $2}' | sort | uniq | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore))) +OLDER_GIT_FILES=$(git log --name-status --pretty=oneline | egrep "^A\t|^M\t" | awk '{print $2}' | sort | uniq | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) + +if [[ "x${TRAVIS_BRANCH}" != "x" ]]; then + FILES_TO_SCAN=$(git diff --name-only --diff-filter=AM ${TRAVIS_BRANCH}...HEAD | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) +else + FILES_TO_SCAN=$(find . -type f | grep -Ev '(\.git)' | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) +fi + +if [ -z "$current_year" ] || [ $current_year -lt $origin_year ]; then + echo "Can't get correct system time\n >>Use back_up_year=$back_up_year as current_year to check copyright in the file $f\n" + current_year=$back_up_year +fi + +lic_ibm_identifier=" (c) Copyright IBM Corporation" +lic_redhat_identifier=" Copyright (c) ${current_year} Red Hat, Inc." + +lic_year=() +#All possible combination within [origin_year, current_year] range is valid format +#seq isn't recommanded after bash version 3.0 +for ((start_year=origin_year;start_year<=current_year;start_year++)); +do + lic_year+=(" (c) Copyright IBM Corporation ${start_year}. All Rights Reserved.") + for ((end_year=start_year+1;end_year<=current_year;end_year++)); + do + lic_year+=(" (c) Copyright IBM Corporation ${start_year}, ${end_year}. All Rights Reserved.") + done +done +lic_year_size=${#lic_year[@]} + +#lic_rest to scan for rest copyright format's correctness +lic_rest=() +lic_rest+=(" Licensed Materials - Property of IBM") +lic_rest+=(" Note to U.S. Government Users Restricted Rights:") +lic_rest+=(" Use, duplication or disclosure restricted by GSA ADP Schedule") +lic_rest+=(" Contract with IBM Corp.") +lic_rest_size=${#lic_rest[@]} + +#Used to signal an exit +ERROR=0 +RETURNCODE=0 + +echo "##### Copyright check #####" +#Loop through all files. Ignore .FILENAME types +#for f in `find .. -type f ! -path "../.eslintrc.js" ! -path "../build-harness/*" ! -path "../auth-setup/*" ! -path "../sslcert/*" ! -path "../node_modules/*" ! -path "../coverage/*" ! -path "../test-output/*" ! -path "../build/*" ! -path "../nls/*" ! -path "../public/*"`; do +for f in $FILES_TO_SCAN; do + if [ ! -f "$f" ]; then + continue + fi + + # Flags that indicate the licenses to check for + must_have_redhat_license=false + must_have_ibm_license=false + flag_redhat_license=false + flag_ibm_license=false + + FILETYPE=$(basename ${f##*.}) + case "${FILETYPE}" in + js | go | scss | properties | java | rb | sh ) + COMMENT_PREFIX="" + ;; + *) + #printf " Extension $FILETYPE not considered !!!\n" + continue + esac + + #Read the first 15 lines, most Copyright headers use the first 10 lines. + header=`head -15 $f` + + # Strip directory prefix, if any + if [[ $f == "./"* ]]; then + f=${f:2} + fi + + printf " ========>>>>>> Scanning $f . . .\n" + if [[ "${ADDED_SINCE_1_MAR_2020}" == *"$f"* ]]; then + printf " ---> Added since 01/03/2020\n" + must_have_redhat_license=true + flag_ibm_license=true + elif [[ "${MODIFIED_SINCE_1_MAR_2020}" == *"$f"* ]]; then + printf " ---> Modified since 01/03/2020\n" + must_have_redhat_license=true + must_have_ibm_license=true + elif [[ "${OLDER_GIT_FILES}" == *"$f"* ]]; then + printf " ---> File older than 01/03/2020\n" + must_have_ibm_license=true + flag_redhat_license=true + else + # Default case, could be new file not yet in git(?) - only expect Red Hat license + must_have_redhat_license=true + fi + + if [[ "${must_have_redhat_license}" == "true" ]] && [[ "$header" != *"${lic_redhat_identifier}"* ]]; then + printf " Missing copyright\n >> Could not find [${lic_redhat_identifier}] in the file.\n" + ERROR=1 + fi + + if [[ "${flag_redhat_license}" == "true" ]] && [[ "$header" == *"${lic_redhat_identifier}"* ]]; then + printf " Warning: Older file, may not include Red Hat license.\n" + fi + + if [[ "${flag_ibm_license}" == "true" ]] && [[ "$header" == *"${lic_ibm_identifier}"* ]]; then + printf " Warning: newer file, may not contain IBM license.\n" + fi + + if [[ "${must_have_ibm_license}" == "true" ]]; then + # Verify IBM copyright is present + #Check for year copyright single line + year_line_count=0 + for ((i=0;i<${lic_year_size};i++)); + do + #Validate year formart within [origin_year, current_year] range + if [[ "$header" == *"${lic_year[$i]}"* ]]; then + year_line_count=$((year_line_count + 1)) + fi + done + + #Must find and only find one line valid year, otherwise invalid copyright formart + if [[ $year_line_count != 1 ]]; then + printf "Missing copyright\n >>Could not find correct copyright year in the file $f\n" + ERROR=1 + #break + fi + + #Check for rest copyright lines + for ((i=0;i<${lic_rest_size};i++)); + do + #Validate the copyright line being checked is present + if [[ "$header" != *"${lic_rest[$i]}"* ]]; then + printf "Missing copyright\n >>Could not find [${lic_rest[$i]}] in the file $f\n" + ERROR=1 + #break 2 + fi + done + fi # end must_have_ibm_license + + #Add a status message of OK, if all copyright lines are found + if [[ "$ERROR" == 0 ]]; then + printf "OK\n" + else + RETURNCODE=$ERROR + ERROR=0 # Reset error + fi +done + +echo "##### Copyright check ##### ReturnCode: ${RETURNCODE}" +exit $RETURNCODE diff --git a/cicd-scripts/customize-mco.sh b/cicd-scripts/customize-mco.sh new file mode 100755 index 000000000..427b4e4de --- /dev/null +++ b/cicd-scripts/customize-mco.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -exo pipefail + +ROOTDIR="$(cd "$(dirname "$0")/.." ; pwd -P)" +export PATH=${PATH}:${ROOTDIR}/bin + +if [[ "$(uname)" == "Linux" ]]; then + SED_COMMAND='sed -i-e -e' +elif [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -i '-e' -e' +fi + +# Use snapshot for target release. Use latest one if no branch info detected, or not a release branch +BRANCH="" +LATEST_SNAPSHOT="" +if [[ "${PULL_BASE_REF}" == "release-"* ]]; then + BRANCH=${PULL_BASE_REF#"release-"} + BRANCH=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'")))|keys[length-1]' | awk -F '-' '{print $1}') + BRANCH="${BRANCH#\"}" + LATEST_SNAPSHOT=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'-SNAPSHOT")))|keys[length-1]') +fi +if [[ "${LATEST_SNAPSHOT}" == "null" ]] || [[ "${LATEST_SNAPSHOT}" == "" ]]; then + LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("SNAPSHOT")))|keys[length-1]') +fi + +# trim the leading and tailing quotes +LATEST_SNAPSHOT="${LATEST_SNAPSHOT#\"}" +LATEST_SNAPSHOT="${LATEST_SNAPSHOT%\"}" + +# list all components need to do test. +CHANGED_COMPONENTS="" +GINKGO_FOCUS="" +IMAGE="" + +update_mco_cr() { + if [ "${OPENSHIFT_CI}" == "true" ]; then + # discard unstaged changes + cd ${ROOTDIR} && git checkout -- . + for component_name in ${CHANGED_COMPONENTS}; do + component_anno_name=$(echo ${component_name} | sed 's/-/_/g') + get_image ${component_name} + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-${component_anno_name}-image: ${IMAGE}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-${component_anno_name}-image: ${IMAGE}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + done + else + if [[ -n "${RBAC_QUERY_PROXY_IMAGE_REF}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-rbac_query_proxy-image: ${RBAC_QUERY_PROXY_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-rbac_query_proxy-image: ${RBAC_QUERY_PROXY_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi + if [[ -n "${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-endpoint_monitoring_operator-image: ${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-endpoint_monitoring_operator-image: ${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi + if [[ -n "${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-grafana_dashboard_loader-image: ${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-grafana_dashboard_loader-image: ${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi + if [[ -n "${METRICS_COLLECTOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-metrics_collector-image: ${METRICS_COLLECTOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-metrics_collector-image: ${METRICS_COLLECTOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi + if [[ -n "${OBSERVATORIUM_OPERATOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-observatorium_operator-image: ${OBSERVATORIUM_OPERATOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-metrics_collector-image: ${OBSERVATORIUM_OPERATOR_IMAGE_REF}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi + fi + + # Add mco-imageTagSuffix annotation + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-imageTagSuffix: ${LATEST_SNAPSHOT}" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-imageTagSuffix: ${LATEST_SNAPSHOT}" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + + # need to add this annotation due to KinD cluster resources are insufficient + if [[ -n "${IS_KIND_ENV}" ]]; then + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-thanos-without-resources-requests: true" ${ROOTDIR}/examples/mco/e2e/v1beta1/observability.yaml + ${SED_COMMAND} "/annotations.*/a \ \ \ \ mco-thanos-without-resources-requests: true" ${ROOTDIR}/examples/mco/e2e/v1beta2/observability.yaml + fi +} + +get_image() { + if [[ $1 = "rbac-query-proxy" ]]; then + IMAGE="${RBAC_QUERY_PROXY_IMAGE_REF}" + fi + if [[ $1 = "endpoint-monitoring-operator" ]]; then + IMAGE="${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" + fi + if [[ $1 = "grafana-dashboard-loader" ]]; then + IMAGE="${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" + fi + if [[ $1 = "metrics-collector" ]]; then + IMAGE="${METRICS_COLLECTOR_IMAGE_REF}" + fi +} + +# function get_changed_components is used to get the component used to test +# get_changed_components is to get the component name based on the changes in your PR +get_changed_components() { + if [ "${OPENSHIFT_CI}" == "true" ]; then + changed_files=$(cd ${ROOTDIR}; git diff --name-only HEAD~1) + for file in ${changed_files}; do + if [[ ${file} =~ ^proxy ]]; then + CHANGED_COMPONENTS+=" rbac-query-proxy" + continue + fi + if [[ ${file} =~ ^operators/endpointmetrics || ${file} =~ ^operators/pkg ]]; then + CHANGED_COMPONENTS+=" endpoint-monitoring-operator" + continue + fi + if [[ ${file} =~ ^loaders/dashboards ]]; then + CHANGED_COMPONENTS+=" grafana-dashboard-loader" + continue + fi + if [[ ${file} =~ ^collectors/metrics ]]; then + CHANGED_COMPONENTS+=" metrics-collector" + continue + fi + if [[ ${file} =~ ^pkg ]]; then + CHANGED_COMPONENTS="rbac-query-proxy metrics-collector endpoint-monitoring-operator grafana-dashboard-loader" + break + fi + done + fi + # remove duplicates + CHANGED_COMPONENTS=$(echo "${CHANGED_COMPONENTS}" | xargs -n1 | sort -u | xargs) + echo "Tested components are ${CHANGED_COMPONENTS}" +} + +# function get_ginkgo_focus is to get the required cases +get_ginkgo_focus() { + if [ "${OPENSHIFT_CI}" == "true" ]; then + changed_files=$(cd $ROOTDIR; git diff --name-only HEAD~1) + for file in ${changed_files}; do + if [[ ${file} =~ ^proxy ]]; then + GINKGO_FOCUS+=" --focus grafana/g0 --focus metrics/g0" + continue + fi + if [[ ${file} =~ ^collectors/metrics ]]; then + GINKGO_FOCUS+=" --focus grafana/g0 --focus metrics/g0 --focus addon/g0" + continue + fi + if [[ ${file} =~ ^operators/endpointmetrics ]]; then + GINKGO_FOCUS+=" --focus grafana/g0 --focus metrics/g0 --focus addon/g0 --focus endpoint_preserve/g0" + continue + fi + if [[ ${file} =~ ^loaders/dashboards ]]; then + GINKGO_FOCUS+=" --focus grafana/g0 --focus metrics/g0 --focus addon/g0" + continue + fi + if [[ $file =~ ^operators/multiclusterobservability ]]; then + GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0" + continue + fi + if [[ $file =~ ^operators/pkg ]]; then + GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0 --focus endpoint_preserve/g0" + continue + fi + if [[ ${file} =~ ^pkg ]]; then + # test all cases + GINKGO_FOCUS="" + break + fi + if [[ $file =~ ^examples/alerts ]]; then + GINKGO_FOCUS+=" --focus alert/g0 --focus alertforward/g0" + continue + fi + if [[ ${file} =~ ^examples/dashboards ]]; then + GINKGO_FOCUS+=" --focus dashboard/g0" + continue + fi + if [[ ${file} =~ ^examples/metrics ]]; then + GINKGO_FOCUS+=" --focus metrics/g0" + continue + fi + if [[ ${file} =~ ^tests ]]; then + GINKGO_FOCUS+=" --focus $(echo ${file} | cut -d '/' -f4 | sed -En 's/observability_(.*)_test.go/\1/p')/g0" + continue + fi + if [[ ${file} =~ ^tools ]]; then + GINKGO_FOCUS+=" --focus grafana_dev/g0" + continue + fi + done + fi + + if [[ -n "${IS_KIND_ENV}" ]]; then + # For KinD cluster, do not need to run all test cases + GINKGO_FOCUS=" --focus manifestwork/g0 --focus endpoint_preserve/g0 --focus grafana/g0 --focus metrics/g0 --focus addon/g0 --focus alert/g0 --focus dashboard/g0" + else + GINKGO_FOCUS=$(echo "${GINKGO_FOCUS}" | xargs -n2 | sort -u | xargs) + fi + echo "Test focuses are ${GINKGO_FOCUS}" +} + +# start executing +get_changed_components +update_mco_cr +get_ginkgo_focus +echo "${GINKGO_FOCUS}" > /tmp/ginkgo_focus diff --git a/cicd-scripts/deploy-to-cluster.sh b/cicd-scripts/deploy-to-cluster.sh new file mode 100755 index 000000000..6ede6dcc8 --- /dev/null +++ b/cicd-scripts/deploy-to-cluster.sh @@ -0,0 +1,4 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +echo "DEPLOY TO CLUSTER GOES HERE! IT SHOULD DO NOTHING RIGHT NOW!" \ No newline at end of file diff --git a/cicd-scripts/install-dependencies.sh b/cicd-scripts/install-dependencies.sh new file mode 100755 index 000000000..344c7ab45 --- /dev/null +++ b/cicd-scripts/install-dependencies.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +echo "install dependencies" + +_OPERATOR_SDK_VERSION=v1.4.2 + +if ! [ -x "$(command -v operator-sdk)" ]; then + if [[ "$OSTYPE" == "linux-gnu" ]]; then + curl -L https://github.com/operator-framework/operator-sdk/releases/download/${_OPERATOR_SDK_VERSION}/operator-sdk_linux_amd64 -o operator-sdk + elif [[ "$OSTYPE" == "darwin"* ]]; then + curl -L https://github.com/operator-framework/operator-sdk/releases/download/${_OPERATOR_SDK_VERSION}/operator-sdk_darwin_amd64 -o operator-sdk + fi + chmod +x operator-sdk + sudo mv operator-sdk /usr/local/bin/operator-sdk +fi diff --git a/cicd-scripts/run-e2e-in-kind-via-prow.sh b/cicd-scripts/run-e2e-in-kind-via-prow.sh new file mode 100755 index 000000000..d641ea16e --- /dev/null +++ b/cicd-scripts/run-e2e-in-kind-via-prow.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -euxo pipefail + +KEY="${SHARED_DIR}/private.pem" +chmod 400 "${KEY}" + +IP="$(cat "${SHARED_DIR}/public_ip")" +HOST="ec2-user@${IP}" +OPT=(-q -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" -i "${KEY}") + +# support gnu sed only give that this script will be executed in prow env +SED_COMMAND='sed -i-e -e' + +if [ "${OPENSHIFT_CI}" == "true" ]; then + ${SED_COMMAND} "$ a\export OPENSHIFT_CI=${OPENSHIFT_CI}" ./tests/run-in-kind/env.sh +fi + +if [[ -n "${PULL_BASE_REF}" ]]; then + ${SED_COMMAND} "$ a\export PULL_BASE_REF=${PULL_BASE_REF}" ./tests/run-in-kind/env.sh +fi + +if [[ -n "${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "$ a\export MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF=${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF}" ./tests/run-in-kind/env.sh +fi +if [[ -n "${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "$ a\export ENDPOINT_MONITORING_OPERATOR_IMAGE_REF=${ENDPOINT_MONITORING_OPERATOR_IMAGE_REF}" ./tests/run-in-kind/env.sh +fi +if [[ -n "${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" ]]; then + ${SED_COMMAND} "$ a\export GRAFANA_DASHBOARD_LOADER_IMAGE_REF=${GRAFANA_DASHBOARD_LOADER_IMAGE_REF}" ./tests/run-in-kind/env.sh +fi +if [[ -n "${METRICS_COLLECTOR_IMAGE_REF}" ]]; then + ${SED_COMMAND} "$ a\export METRICS_COLLECTOR_IMAGE_REF=${METRICS_COLLECTOR_IMAGE_REF}" ./tests/run-in-kind/env.sh +fi +if [[ -n "${RBAC_QUERY_PROXY_IMAGE_REF}" ]]; then + ${SED_COMMAND} "$ a\export RBAC_QUERY_PROXY_IMAGE_REF=${RBAC_QUERY_PROXY_IMAGE_REF}" ./tests/run-in-kind/env.sh +fi + +ssh "${OPT[@]}" "$HOST" sudo yum install gcc git -y +scp "${OPT[@]}" -r ../multicluster-observability-operator "$HOST:/tmp/multicluster-observability-operator" +ssh "${OPT[@]}" "$HOST" "cd /tmp/multicluster-observability-operator/tests/run-in-kind && ./run-e2e-in-kind.sh" > >(tee "$ARTIFACT_DIR/run-e2e-in-kind.log") 2>&1 diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh new file mode 100755 index 000000000..b2e1da06d --- /dev/null +++ b/cicd-scripts/run-e2e-tests.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -exo pipefail + +ROOTDIR="$(cd "$(dirname "$0")/.." ; pwd -P)" + +SED_COMMAND='sed -i -e' +if [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -i '-e' -e' +fi + +# customize the images for testing +${ROOTDIR}/cicd-scripts/customize-mco.sh +GINKGO_FOCUS="$(cat /tmp/ginkgo_focus)" + +# need to modify sc for KinD +if [[ -n "${IS_KIND_ENV}" ]]; then + ${SED_COMMAND} "s~gp2$~standard~g" ${ROOTDIR}/examples/minio/minio-pvc.yaml +fi + +kubeconfig_hub_path="" +if [ ! -z "${SHARED_DIR}" ]; then + export KUBECONFIG="${SHARED_DIR}/hub-1.kc" + kubeconfig_hub_path="${SHARED_DIR}/hub-1.kc" +else + # for local testing + if [ -z "${KUBECONFIG}" ]; then + echo "Error: environment variable KUBECONFIG must be specified!" + exit 1 + fi + kubeconfig_hub_path="${HOME}/.kube/kubeconfig-hub" + kubectl config view --raw --minify > ${kubeconfig_hub_path} +fi + +kubecontext=$(kubectl config current-context) +cluster_name="local-cluster" + +if [[ -n "${IS_KIND_ENV}" ]]; then + clusterServerURL="https://127.0.0.1:32806" + base_domain="placeholder" +else + clusterServerURL=$(kubectl config view -o jsonpath="{.clusters[0].cluster.server}") + app_domain=$(kubectl -n openshift-ingress-operator get ingresscontrollers default -ojsonpath='{.status.domain}') + base_domain="${app_domain#apps.}" + kubectl apply -f ${ROOTDIR}/operators/multiclusterobservability/config/crd/bases +fi + +OPTIONSFILE=${ROOTDIR}/tests/resources/options.yaml +# remove the options file if it exists +rm -f ${OPTIONSFILE} + +printf "options:" >> ${OPTIONSFILE} +printf "\n kubeconfig: ${kubeconfig_hub_path}" >> ${OPTIONSFILE} +printf "\n hub:" >> ${OPTIONSFILE} +printf "\n clusterServerURL: ${clusterServerURL}" >> ${OPTIONSFILE} +printf "\n kubeconfig: ${kubeconfig_hub_path}" >> ${OPTIONSFILE} +printf "\n kubecontext: ${kubecontext}" >> ${OPTIONSFILE} +printf "\n baseDomain: ${base_domain}" >> ${OPTIONSFILE} +if [[ -n "${IS_KIND_ENV}" ]]; then + printf "\n grafanaURL: http://127.0.0.1:31001" >> ${OPTIONSFILE} + printf "\n grafanaHost: grafana-test" >> ${OPTIONSFILE} +fi +printf "\n clusters:" >> ${OPTIONSFILE} +printf "\n - name: ${cluster_name}" >> ${OPTIONSFILE} +if [[ -n "${IS_KIND_ENV}" ]]; then + printf "\n clusterServerURL: ${clusterServerURL}" >> ${OPTIONSFILE} +fi +printf "\n baseDomain: ${base_domain}" >> ${OPTIONSFILE} +printf "\n kubeconfig: ${kubeconfig_hub_path}" >> ${OPTIONSFILE} +printf "\n kubecontext: ${kubecontext}" >> ${OPTIONSFILE} + +if command -v ginkgo &> /dev/null; then + GINKGO_CMD=ginkgo +else + # just for Prow KinD vm + go install github.com/onsi/ginkgo/ginkgo@latest + GINKGO_CMD="$(go env GOPATH)/bin/ginkgo" +fi +${GINKGO_CMD} -debug -trace ${GINKGO_FOCUS} -v ${ROOTDIR}/tests/pkg/tests -- -options=${OPTIONSFILE} -v=3 + +cat ${ROOTDIR}/tests/pkg/tests/results.xml | grep failures=\"0\" | grep errors=\"0\" +if [ $? -ne 0 ]; then + echo "Cannot pass all test cases." + cat ${ROOTDIR}/tests/pkg/tests/results.xml + exit 1 +fi diff --git a/cicd-scripts/run-unit-tests.sh b/cicd-scripts/run-unit-tests.sh new file mode 100755 index 000000000..66648121e --- /dev/null +++ b/cicd-scripts/run-unit-tests.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +echo "/: : $1" + +git config --global url."https://$GITHUB_TOKEN@github.com/stolostron".insteadOf "https://github.com/stolostron" + +go test ./... \ No newline at end of file diff --git a/cicd-scripts/setup-e2e-tests.sh b/cicd-scripts/setup-e2e-tests.sh new file mode 100755 index 000000000..8fa2f5ebc --- /dev/null +++ b/cicd-scripts/setup-e2e-tests.sh @@ -0,0 +1,275 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +# Required KUBECONFIG environment variable to run this script: + +set -exo pipefail + +if [[ -z "${KUBECONFIG}" ]]; then + echo "Error: environment variable KUBECONFIG must be specified!" + exit 1 +fi + +ROOTDIR="$(cd "$(dirname "$0")/.." ; pwd -P)" +# Create bin directory and add it to PATH +mkdir -p ${ROOTDIR}/bin +export PATH=${PATH}:${ROOTDIR}/bin + +OCM_DEFAULT_NS="open-cluster-management" +AGENT_NS="open-cluster-management-agent" +HUB_NS="open-cluster-management-hub" +OBSERVABILITY_NS="open-cluster-management-observability" +IMAGE_REPO="quay.io/open-cluster-management" +export MANAGED_CLUSTER="local-cluster" # registration-operator needs this + +SED_COMMAND='sed -i-e -e' +if [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -i '-e' -e' +fi + +# install jq +if ! command -v jq &> /dev/null; then + if [[ "$(uname)" == "Linux" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + elif [[ "$(uname)" == "Darwin" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 + fi + chmod +x ./jq && mv ./jq ${ROOTDIR}/bin/jq +fi + +# Use snapshot for target release. Use latest one if no branch info detected, or not a release branch +BRANCH="" +LATEST_SNAPSHOT="" +if [[ "${PULL_BASE_REF}" == "release-"* ]]; then + BRANCH=${PULL_BASE_REF#"release-"} + BRANCH=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'")))|keys[length-1]' | awk -F '-' '{print $1}') + BRANCH="${BRANCH#\"}" + LATEST_SNAPSHOT=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'-SNAPSHOT")))|keys[length-1]') +fi +if [[ "${LATEST_SNAPSHOT}" == "null" ]] || [[ "${LATEST_SNAPSHOT}" == "" ]]; then + LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("SNAPSHOT")))|keys[length-1]') +fi + +# trim the leading and tailing quotes +LATEST_SNAPSHOT="${LATEST_SNAPSHOT#\"}" +LATEST_SNAPSHOT="${LATEST_SNAPSHOT%\"}" + +# install kubectl +if ! command -v kubectl &> /dev/null; then + echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" + if [[ "$(uname)" == "Linux" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + elif [[ "$(uname)" == "Darwin" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl + fi + chmod +x ./kubectl && mv ./kubectl ${ROOTDIR}/bin/kubectl +fi + +# install kustomize +if ! command -v kustomize &> /dev/null; then + echo "This script will install kustomize (sigs.k8s.io/kustomize/kustomize) on your machine" + if [[ "$(uname)" == "Linux" ]]; then + curl -o kustomize_v3.8.7.tar.gz -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv3.8.7/kustomize_v3.8.7_linux_amd64.tar.gz + elif [[ "$(uname)" == "Darwin" ]]; then + curl -o kustomize_v3.8.7.tar.gz -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv3.8.7/kustomize_v3.8.7_darwin_amd64.tar.gz + fi + tar xzvf kustomize_v3.8.7.tar.gz + chmod +x ./kustomize && mv ./kustomize ${ROOTDIR}/bin/kustomize +fi + +# deploy the hub and spoke core via OLM +deploy_hub_spoke_core() { + cd ${ROOTDIR} + if [[ -d "registration-operator" ]]; then + rm -rf registration-operator + fi + git clone --depth 1 -b release-2.4 https://github.com/stolostron/registration-operator.git && cd registration-operator + ${SED_COMMAND} "s~clusterName: cluster1$~clusterName: ${MANAGED_CLUSTER}~g" deploy/klusterlet/config/samples/operator_open-cluster-management_klusterlets.cr.yaml + # deploy hub and spoke via OLM + make cluster-ip + make deploy + + # wait until hub and spoke are ready + wait_for_deployment_ready 10 60s ${HUB_NS} cluster-manager-registration-controller cluster-manager-registration-webhook cluster-manager-work-webhook + wait_for_deployment_ready 10 60s ${AGENT_NS} klusterlet-registration-agent klusterlet-work-agent +} + +# approve the CSR for cluster join request +approve_csr_joinrequest() { + echo "wait for CSR for cluster join reqest is created..." + for i in {1..60}; do + # TODO(morvencao): remove the hard-coded cluster label + csrs=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER}) + if [[ ! -z ${csrs} ]]; then + csrnames=$(kubectl get csr -lopen-cluster-management.io/cluster-name=${MANAGED_CLUSTER} -o jsonpath={.items..metadata.name}) + for csrname in ${csrnames}; do + echo "approve CSR: ${csrname}" + kubectl certificate approve ${csrname} + done + break + fi + if [[ ${i} -eq 60 ]]; then + echo "timeout wait for CSR is created." + exit 1 + fi + echo "retrying in 10s..." + sleep 10 + done + + for i in {1..20}; do + clusters=$(kubectl get managedcluster) + if [[ ! -z ${clusters} ]]; then + clusternames=$(kubectl get managedcluster -o jsonpath={.items..metadata.name}) + for clustername in ${clusternames}; do + echo "approve joinrequest for ${clustername}" + kubectl patch managedcluster ${clustername} --patch '{"spec":{"hubAcceptsClient":true}}' --type=merge + if [[ -n "${IS_KIND_ENV}" ]]; then + # update vendor label for KinD env + kubectl label managedcluster ${clustername} vendor- + kubectl label managedcluster ${clustername} vendor=GKE + fi + done + break + fi + if [[ ${i} -eq 20 ]]; then + echo "timeout wait for managedcluster is created." + exit 1 + fi + echo "retrying in 10s..." + sleep 10 + done +} + +# deploy the grafana-test to check the dashboards from browsers +deploy_grafana_test() { + cd ${ROOTDIR} + ${SED_COMMAND} "s~name: grafana$~name: grafana-test~g; s~app: multicluster-observability-grafana$~app: multicluster-observability-grafana-test~g; s~secretName: grafana-config$~secretName: grafana-config-test~g; s~secretName: grafana-datasources$~secretName: grafana-datasources-test~g; /MULTICLUSTEROBSERVABILITY_CR_NAME/d" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + ${SED_COMMAND} "s~image: quay.io/stolostron/grafana-dashboard-loader.*$~image: ${IMAGE_REPO}/grafana-dashboard-loader:${LATEST_SNAPSHOT}~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + ${SED_COMMAND} "s~replicas: 2$~replicas: 1~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + kubectl apply -f operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + kubectl apply -f ${ROOTDIR}/tests/run-in-kind/grafana # create grafana-test svc, grafana-test config and datasource configmaps + + if [[ -z "${IS_KIND_ENV}" ]]; then + # TODO(morvencao): remove the following two extra routes after after accessing metrics from grafana url with bearer token is supported + temp_route=$(mktemp -d /tmp/grafana.XXXXXXXXXX) + # install grafana-test route + cat << EOF > ${temp_route}/grafana-test-route.yaml +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: grafana-test +spec: + host: grafana-test + wildcardPolicy: None + to: + kind: Service + name: grafana-test +EOF + + app_domain=$(kubectl -n openshift-ingress-operator get ingresscontrollers default -o jsonpath='{.status.domain}') + ${SED_COMMAND} "s~host: grafana-test$~host: grafana-test.${app_domain}~g" ${temp_route}/grafana-test-route.yaml + kubectl -n ${OBSERVABILITY_NS} apply -f ${temp_route}/grafana-test-route.yaml + fi +} + +# deploy the MCO operator via the kustomize resources +deploy_mco_operator() { + if [[ -n "${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF}" ]]; then + cd ${ROOTDIR}/operators/multiclusterobservability/config/manager && kustomize edit set image quay.io/stolostron/multicluster-observability-operator=${MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF} + else + cd ${ROOTDIR}/operators/multiclusterobservability/config/manager && kustomize edit set image quay.io/stolostron/multicluster-observability-operator="${IMAGE_REPO}/multicluster-observability-operator:${LATEST_SNAPSHOT}" + fi + cd ${ROOTDIR} + kustomize build ${ROOTDIR}/operators/multiclusterobservability/config/default | kubectl apply -n ${OCM_DEFAULT_NS} -f - + + # wait until mco is ready + wait_for_deployment_ready 10 60s ${OCM_DEFAULT_NS} multicluster-observability-operator + echo "mco operator is deployed successfully." + + kubectl create ns ${OBSERVABILITY_NS} || true +} + +# wait for MCO CR reaadiness with budget +wait_for_observability_ready() { + echo "wait for mco is ready and running..." + retry_number=10 + timeout=60s + for (( i = 1; i <= ${retry_number}; i++ )) ; do + + if kubectl wait --timeout=${timeout} --for=condition=Ready mco/observability &> /dev/null; then + echo "Observability has been started up and is runing." + break + else + echo "timeout wait for mco are ready, retry in 10s...." + sleep 10 + continue + fi + if [[ ${i} -eq ${retry_number} ]]; then + echo "timeout wait for mco is ready." + exit 1 + fi + done +} + +# wait until deployment are ready with budget +wait_for_deployment_ready() { + if [[ -z "${1}" ]]; then + echo "retry number is empty, exiting..." + fi + retry_number=${1} + if [[ -z "${2}" ]]; then + echo "timeout is empty, exiting..." + fi + timeout=${2} + if [[ -z "${3}" ]]; then + echo "namespace is empty, exiting..." + exit 1 + fi + ns=${3} + if [[ -z "${4}" ]]; then + echo "at least one deployment should be specified, exiting..." + exit 1 + fi + + echo "wait for deployment ${@:4} in namespace ${ns} are starting up and running..." + for (( i = 1; i <= ${retry_number}; i++ )) ; do + if ! kubectl get ns ${ns} &> /dev/null; then + echo "namespace ${ns} is not created, retry in 10s...." + sleep 10 + continue + fi + + if ! kubectl -n ${ns} get deploy ${@:4} &> /dev/null; then + echo "deployment ${@:4} are not created yet, retry in 10s...." + sleep 10 + continue + fi + + if kubectl -n ${ns} wait --timeout=${timeout} --for=condition=Available deploy ${@:4} &> /dev/null; then + echo "deployment ${@:4} have been started up and are runing." + break + else + echo "timeout wait for deployment ${@:4} are ready, retry in 10s...." + sleep 10 + continue + fi + if [[ ${i} -eq ${retry_number} ]]; then + echo "timeout wait for deployment ${@:4} are ready." + exit 1 + fi + done +} + +# function execute is the main routine to do the actual work +execute() { + deploy_hub_spoke_core + approve_csr_joinrequest + deploy_mco_operator + deploy_grafana_test + echo "OCM and MCO are installed successfuly..." +} + +# start executing the ACTION +execute diff --git a/cicd-scripts/update-check-mco-csv.sh b/cicd-scripts/update-check-mco-csv.sh new file mode 100755 index 000000000..2b9cf2e63 --- /dev/null +++ b/cicd-scripts/update-check-mco-csv.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +# generate csv +./cicd-scripts/install-dependencies.sh + +operator-sdk generate csv --crd-dir=deploy/crds --deploy-dir=deploy/ --output-dir=deploy/olm-catalog/multicluster-observability-operator --operator-name=multicluster-observability-operator --csv-version=0.1.0 +extra_text=" - name: observatoria.core.observatorium.io + version: v1alpha1 + kind: Observatorium + displayName: Observatorium + description: Observatorium is the Schema for the observatoria API + - name: observabilityaddons.observability.open-cluster-management.io + version: v1beta1 + kind: ObservabilityAddon + displayName: ObservabilityAddon + description: ObservabilityAddon is the Schema for the observabilityaddon API" +echo "$extra_text" > extra_text_tmp + +sed_command='sed -i-e -e' +if [[ "$(uname)" == "Darwin" ]]; then + sed_command='sed -i '-e' -e' +fi + +$sed_command 's/serviceAccountName: open-cluster-management:multicluster-observability-operator/serviceAccountName: multicluster-observability-operator/g' deploy/olm-catalog/multicluster-observability-operator/manifests/multicluster-observability-operator.clusterserviceversion.yaml +$sed_command '/version: v1beta1/r extra_text_tmp' deploy/olm-catalog/multicluster-observability-operator/manifests/multicluster-observability-operator.clusterserviceversion.yaml +rm -rf extra_text_tmp deploy/olm-catalog/multicluster-observability-operator/manifests/multicluster-observability-operator.clusterserviceversion.yaml-e + +# check if there is something needs to be committed +diff deploy/req_crds/core.observatorium.io_observatoria.yaml deploy/olm-catalog/multicluster-observability-operator/manifests/core.observatorium.io_observatoria.yaml +if [ $? -ne 0 ]; then + echo "Failed to check csv: should update observatorium CRD" + exit 1 +fi + +diff deploy/req_crds/observability.open-cluster-management.io_observabilityaddon_crd.yaml deploy/olm-catalog/multicluster-observability-operator/manifests/observability.open-cluster-management.io_observabilityaddon_crd.yaml +if [ $? -ne 0 ]; then + echo "Failed to check csv: should update observabilityaddon CRD" + exit 1 +fi + +diff deploy/crds/observability.open-cluster-management.io_multiclusterobservabilities_crd.yaml deploy/olm-catalog/multicluster-observability-operator/manifests/observability.open-cluster-management.io_multiclusterobservabilities_crd.yaml +if [ $? -ne 0 ]; then + echo "Failed to check csv: should update multiclusterobservability CRD" + exit 1 +fi + +if git diff --exit-code deploy/olm-catalog/multicluster-observability-operator/manifests/multicluster-observability-operator.clusterserviceversion.yaml; then + echo "Check csv successfully" +else + echo "Failed to check csv" + exit 1 +fi diff --git a/collectors/metrics/Dockerfile b/collectors/metrics/Dockerfile new file mode 100644 index 000000000..56593736f --- /dev/null +++ b/collectors/metrics/Dockerfile @@ -0,0 +1,57 @@ +# Copyright Contributors to the Open Cluster Management project + +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + +WORKDIR /workspace +COPY go.sum go.mod ./ +COPY ./collectors/metrics ./collectors/metrics +COPY ./operators/multiclusterobservability/api ./operators/multiclusterobservability/api +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -v -i -o metrics-collector ./collectors/metrics/cmd/metrics-collector/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ARG VCS_REF +ARG VCS_URL +ARG IMAGE_NAME +ARG IMAGE_DESCRIPTION +ARG IMAGE_DISPLAY_NAME +ARG IMAGE_NAME_ARCH +ARG IMAGE_MAINTAINER +ARG IMAGE_VENDOR +ARG IMAGE_VERSION +ARG IMAGE_RELEASE +ARG IMAGE_SUMMARY +ARG IMAGE_OPENSHIFT_TAGS + +LABEL org.label-schema.vendor="Red Hat" \ + org.label-schema.name="$IMAGE_NAME_ARCH" \ + org.label-schema.description="$IMAGE_DESCRIPTION" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url=$VCS_URL \ + org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \ + org.label-schema.schema-version="1.0" \ + name="$IMAGE_NAME" \ + maintainer="$IMAGE_MAINTAINER" \ + vendor="$IMAGE_VENDOR" \ + version="$IMAGE_VERSION" \ + release="$IMAGE_RELEASE" \ + description="$IMAGE_DESCRIPTION" \ + summary="$IMAGE_SUMMARY" \ + io.k8s.display-name="$IMAGE_DISPLAY_NAME" \ + io.k8s.description="$IMAGE_DESCRIPTION" \ + io.openshift.tags="$IMAGE_OPENSHIFT_TAGS" + +RUN microdnf update &&\ + microdnf install ca-certificates vi --nodocs &&\ + mkdir /licenses &&\ + microdnf clean all + +COPY --from=builder /workspace/metrics-collector /usr/bin/ + +# standalone required parameters +ENV FROM_CA_FILE="/from/service-ca.crt" +ENV INTERVAL="60s" +ENV MATCH_FILE="/metrics/match-file" +ENV LIMIT_BYTES=1073741824 + +CMD ["/bin/bash", "-c", "/usr/bin/metrics-collector --from ${FROM} --from-ca-file ${FROM_CA_FILE} --from-token ${FROM_TOKEN} --to-upload ${TO_UPLOAD} --id ${TENANT_ID} --label cluster=${CLUSTER_NAME} --label clusterID=${CLUSTER_ID} --match-file ${MATCH_FILE} --interval ${INTERVAL} --limit-bytes=${LIMIT_BYTES}"] diff --git a/collectors/metrics/OWNERS b/collectors/metrics/OWNERS new file mode 100644 index 000000000..837c913bd --- /dev/null +++ b/collectors/metrics/OWNERS @@ -0,0 +1,11 @@ +approvers: + - clyang82 + - marcolan018 + - haoqing0110 +reviewers: + - clyang82 + - marcolan018 + - bjoydeep + - songleo + - haoqing0110 + - morvencao diff --git a/collectors/metrics/README.md b/collectors/metrics/README.md new file mode 100644 index 000000000..1a818261e --- /dev/null +++ b/collectors/metrics/README.md @@ -0,0 +1,59 @@ +Metrics Collector +----------- +Metrics Collector implements a client to "scrape" or collect data from OpenShift Promethus +and performs a push fedration to a Thanos instance hosted by Red Hat Advanced Cluster Management for Kubernetes +hub cluster. This project is based on the [Telemeter project](https://github.com/openshift/telemeter). + + +Get started +----------- +To execute the unit test suite, run + +``` +make -f Makefile.prow test-unit +``` + +To build docker image and push to a docker repository, run + +``` +docker build -t {REPO}/metrics-collector:latest . +docker push {REPO}/metrics-collector:latest +``` +{REPO} is the docker repository + + +Integration environment +----------- +Prerequisites: +Commands [kind](https://kind.sigs.k8s.io/) and [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl) are required to setup an integration environment. To install them, run: +``` +./test/integration/prereq.sh +``` +If the image is pushed to a private repo which requires authentication, need to export the user/password for the docker repository before run setup.sh +``` +export DOCKER_USER= +export DOCKER_PASS= +``` + +To launch a self contained integration environment based on the image built above, run: + +``` +./test/integration/setup.sh {REPO}/metrics-collector:latest +``` + +Above command will create a Kind cluster. Then [prometheus](https://prometheus.io/) and [thanos](https://thanos.io/) will be deployed in the cluster. Finally, a deployment of metrics collector will be deployed, which will scrape metrics from prometheus and send metrics to thanos server. + + +To check/operate on the environment, run: +``` +kubectl --kubeconfig $HOME/.kube/kind-config-hub {COMMAND} +``` +{COMMAND} is the target kubectl command. e.g. to check the status for the deployed pods in the Kind cluster, run: +``` +kubectl --kubeconfig $HOME/.kube/kind-config-hub get pods -n open-cluster-management-monitoring +``` + +To clean the integration environment, run: +``` +./test/integration/clean.sh +``` \ No newline at end of file diff --git a/collectors/metrics/cmd/metrics-collector/main.go b/collectors/metrics/cmd/metrics-collector/main.go new file mode 100644 index 000000000..458c2dd1e --- /dev/null +++ b/collectors/metrics/cmd/metrics-collector/main.go @@ -0,0 +1,400 @@ +// Copyright Contributors to the Open Cluster Management project + +package main + +import ( + "context" + "fmt" + stdlog "log" + "net" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/run" + "github.com/prometheus/common/expfmt" + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/uuid" + + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/forwarder" + collectorhttp "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/http" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/metricfamily" +) + +func main() { + opt := &Options{ + Listen: "localhost:9002", + LimitBytes: 200 * 1024, + Rules: []string{`{__name__="up"}`}, + Interval: 4*time.Minute + 30*time.Second, + WorkerNum: 1, + } + cmd := &cobra.Command{ + Short: "Federate Prometheus via push", + SilenceErrors: true, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + return opt.Run() + }, + } + + cmd.Flags().Int64Var(&opt.WorkerNum, "worker-number", opt.WorkerNum, "The number of client runs in the simulate environment.") + cmd.Flags().StringVar(&opt.Listen, "listen", opt.Listen, "A host:port to listen on for health and metrics.") + cmd.Flags().StringVar(&opt.From, "from", opt.From, "The Prometheus server to federate from.") + cmd.Flags().StringVar(&opt.FromToken, "from-token", opt.FromToken, "A bearer token to use when authenticating to the source Prometheus server.") + cmd.Flags().StringVar(&opt.FromCAFile, "from-ca-file", opt.FromCAFile, "A file containing the CA certificate to use to verify the --from URL in addition to the system roots certificates.") + cmd.Flags().StringVar(&opt.FromTokenFile, "from-token-file", opt.FromTokenFile, "A file containing a bearer token to use when authenticating to the source Prometheus server.") + cmd.Flags().StringVar(&opt.ToUpload, "to-upload", opt.ToUpload, "A server endpoint to push metrics to.") + cmd.Flags().DurationVar(&opt.Interval, "interval", opt.Interval, "The interval between scrapes. Prometheus returns the last 5 minutes of metrics when invoking the federation endpoint.") + cmd.Flags().Int64Var(&opt.LimitBytes, "limit-bytes", opt.LimitBytes, "The maxiumum acceptable size of a response returned when scraping Prometheus.") + + // TODO: more complex input definition, such as a JSON struct + cmd.Flags().StringArrayVar(&opt.Rules, "match", opt.Rules, "Match rules to federate.") + cmd.Flags().StringArrayVar(&opt.RecordingRules, "recordingrule", opt.RecordingRules, "Define recording rule is to generate new metrics based on specified query expression.") + cmd.Flags().StringVar(&opt.RulesFile, "match-file", opt.RulesFile, "A file containing match rules to federate, one rule per line.") + + cmd.Flags().StringSliceVar(&opt.LabelFlag, "label", opt.LabelFlag, "Labels to add to each outgoing metric, in key=value form.") + cmd.Flags().StringSliceVar(&opt.RenameFlag, "rename", opt.RenameFlag, "Rename metrics before sending by specifying OLD=NEW name pairs.") + cmd.Flags().StringArrayVar(&opt.ElideLabels, "elide-label", opt.ElideLabels, "A list of labels to be elided from outgoing metrics. Default to elide label prometheus and prometheus_replica") + + cmd.Flags().StringSliceVar(&opt.AnonymizeLabels, "anonymize-labels", opt.AnonymizeLabels, "Anonymize the values of the provided values before sending them on.") + cmd.Flags().StringVar(&opt.AnonymizeSalt, "anonymize-salt", opt.AnonymizeSalt, "A secret and unguessable value used to anonymize the input data.") + cmd.Flags().StringVar(&opt.AnonymizeSaltFile, "anonymize-salt-file", opt.AnonymizeSaltFile, "A file containing a secret and unguessable value used to anonymize the input data.") + + cmd.Flags().BoolVarP(&opt.Verbose, "verbose", "v", opt.Verbose, "Show verbose output.") + + cmd.Flags().StringVar(&opt.LogLevel, "log-level", opt.LogLevel, "Log filtering level. e.g info, debug, warn, error") + + // deprecated opt + cmd.Flags().StringVar(&opt.Identifier, "id", opt.Identifier, "The unique identifier for metrics sent with this client.") + + //simulation test + cmd.Flags().StringVar(&opt.SimulatedTimeseriesFile, "simulated-timeseries-file", opt.SimulatedTimeseriesFile, "A file containing the sample of timeseries.") + + l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + lvl, err := cmd.Flags().GetString("log-level") + if err != nil { + logger.Log(l, logger.Error, "msg", "could not parse log-level.") + } + l = level.NewFilter(l, logger.LogLevelFromString(lvl)) + l = log.WithPrefix(l, "ts", log.DefaultTimestampUTC) + l = log.WithPrefix(l, "caller", log.DefaultCaller) + stdlog.SetOutput(log.NewStdlibAdapter(l)) + opt.Logger = l + logger.Log(l, logger.Info, "msg", "metrics collector initialized") + + if err := cmd.Execute(); err != nil { + logger.Log(l, logger.Error, "err", err) + os.Exit(1) + } +} + +type Options struct { + Listen string + LimitBytes int64 + Verbose bool + + From string + ToUpload string + FromCAFile string + FromToken string + FromTokenFile string + + RenameFlag []string + Renames map[string]string + + ElideLabels []string + + AnonymizeLabels []string + AnonymizeSalt string + AnonymizeSaltFile string + + Rules []string + RecordingRules []string + RulesFile string + + LabelFlag []string + Labels map[string]string + + Interval time.Duration + + LogLevel string + Logger log.Logger + + // deprecated + Identifier string + + // simulation file + SimulatedTimeseriesFile string + + // how many threads are running + // for production, it is always 1 + WorkerNum int64 +} + +func (o *Options) Run() error { + + var g run.Group + + err, cfg := initConfig(o) + if err != nil { + return err + } + + worker, err := forwarder.New(*cfg) + if err != nil { + return fmt.Errorf("failed to configure metrics collector: %v", err) + } + + logger.Log(o.Logger, logger.Info, "msg", "starting metrics collector", "from", o.From, "to", o.ToUpload, "listen", o.Listen) + + { + // Execute the worker's `Run` func. + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + worker.Run(ctx) + return nil + }, func(error) { + cancel() + }) + } + + { + // Notify and reload on SIGHUP. + hup := make(chan os.Signal, 1) + signal.Notify(hup, syscall.SIGHUP) + cancel := make(chan struct{}) + g.Add(func() error { + for { + select { + case <-hup: + if err := worker.Reconfigure(*cfg); err != nil { + logger.Log(o.Logger, logger.Error, "msg", "failed to reload config", "err", err) + return err + } + case <-cancel: + return nil + } + } + }, func(error) { + close(cancel) + }) + } + + if len(o.Listen) > 0 { + handlers := http.NewServeMux() + collectorhttp.DebugRoutes(handlers) + collectorhttp.HealthRoutes(handlers) + collectorhttp.MetricRoutes(handlers) + collectorhttp.ReloadRoutes(handlers, func() error { + return worker.Reconfigure(*cfg) + }) + handlers.Handle("/federate", serveLastMetrics(o.Logger, worker)) + l, err := net.Listen("tcp", o.Listen) + if err != nil { + return fmt.Errorf("failed to listen: %v", err) + } + + { + // Run the HTTP server. + g.Add(func() error { + if err := http.Serve(l, handlers); err != nil && err != http.ErrServerClosed { + logger.Log(o.Logger, logger.Error, "msg", "server exited unexpectedly", "err", err) + return err + } + return nil + }, func(error) { + err := l.Close() + if err != nil { + logger.Log(o.Logger, logger.Error, "msg", "failed to close listener", "err", err) + } + }) + } + } + + err = runMultiWorkers(o) + if err != nil { + return err + } + + return g.Run() +} + +func runMultiWorkers(o *Options) error { + for i := 1; i < int(o.WorkerNum); i++ { + opt := &Options{ + From: o.From, + ToUpload: o.ToUpload, + FromCAFile: o.FromCAFile, + FromTokenFile: o.FromTokenFile, + Rules: o.Rules, + RenameFlag: o.RenameFlag, + RecordingRules: o.RecordingRules, + Interval: o.Interval, + Labels: map[string]string{}, + SimulatedTimeseriesFile: o.SimulatedTimeseriesFile, + Logger: o.Logger, + } + for _, flag := range o.LabelFlag { + values := strings.SplitN(flag, "=", 2) + if len(values) != 2 { + return fmt.Errorf("--label must be of the form key=value: %s", flag) + } + if values[0] == "cluster" { + values[1] += "-" + fmt.Sprint(i) + } + if values[0] == "clusterID" { + values[1] = string(uuid.NewUUID()) + } + opt.Labels[values[0]] = values[1] + } + err, forwardCfg := initConfig(opt) + if err != nil { + return err + } + + forwardWorker, err := forwarder.New(*forwardCfg) + if err != nil { + return fmt.Errorf("failed to configure metrics collector: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + forwardWorker.Run(ctx) + cancel() + }() + + } + return nil +} + +func initConfig(o *Options) (error, *forwarder.Config) { + if len(o.From) == 0 { + return fmt.Errorf("you must specify a Prometheus server to federate from (e.g. http://localhost:9090)"), nil + } + + for _, flag := range o.LabelFlag { + values := strings.SplitN(flag, "=", 2) + if len(values) != 2 { + return fmt.Errorf("--label must be of the form key=value: %s", flag), nil + } + if o.Labels == nil { + o.Labels = make(map[string]string) + } + o.Labels[values[0]] = values[1] + } + + for _, flag := range o.RenameFlag { + if len(flag) == 0 { + continue + } + values := strings.SplitN(flag, "=", 2) + if len(values) != 2 { + return fmt.Errorf("--rename must be of the form OLD_NAME=NEW_NAME: %s", flag), nil + } + if o.Renames == nil { + o.Renames = make(map[string]string) + } + o.Renames[values[0]] = values[1] + } + + from, err := url.Parse(o.From) + if err != nil { + return fmt.Errorf("--from is not a valid URL: %v", err), nil + } + from.Path = strings.TrimRight(from.Path, "/") + if len(from.Path) == 0 { + from.Path = "/federate" + } + + var toUpload *url.URL + if len(o.ToUpload) > 0 { + toUpload, err = url.Parse(o.ToUpload) + if err != nil { + return fmt.Errorf("--to-upload is not a valid URL: %v", err), nil + } + } + + if toUpload == nil { + return fmt.Errorf("--to-upload must be specified"), nil + } + + var transformer metricfamily.MultiTransformer + + if len(o.Labels) > 0 { + transformer.WithFunc(func() metricfamily.Transformer { + return metricfamily.NewLabel(o.Labels, nil) + }) + } + + if len(o.Renames) > 0 { + transformer.WithFunc(func() metricfamily.Transformer { + return metricfamily.RenameMetrics{Names: o.Renames} + }) + } + + if len(o.ElideLabels) == 0 { + o.ElideLabels = []string{"prometheus", "prometheus_replica"} + } + transformer.WithFunc(func() metricfamily.Transformer { + return metricfamily.NewElide(o.ElideLabels...) + }) + + transformer.WithFunc(func() metricfamily.Transformer { + return metricfamily.NewDropInvalidFederateSamples(time.Now().Add(-24 * time.Hour)) + }) + + transformer.With(metricfamily.TransformerFunc(metricfamily.PackMetrics)) + transformer.With(metricfamily.TransformerFunc(metricfamily.SortMetrics)) + + return nil, &forwarder.Config{ + From: from, + ToUpload: toUpload, + FromToken: o.FromToken, + FromTokenFile: o.FromTokenFile, + FromCAFile: o.FromCAFile, + + AnonymizeLabels: o.AnonymizeLabels, + AnonymizeSalt: o.AnonymizeSalt, + AnonymizeSaltFile: o.AnonymizeSaltFile, + Debug: o.Verbose, + Interval: o.Interval, + LimitBytes: o.LimitBytes, + Rules: o.Rules, + RecordingRules: o.RecordingRules, + RulesFile: o.RulesFile, + Transformer: transformer, + + Logger: o.Logger, + SimulatedTimeseriesFile: o.SimulatedTimeseriesFile, + } +} + +// serveLastMetrics retrieves the last set of metrics served +func serveLastMetrics(l log.Logger, worker *forwarder.Worker) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + families := worker.LastMetrics() + w.Header().Set("Content-Type", string(expfmt.FmtText)) + encoder := expfmt.NewEncoder(w, expfmt.FmtText) + for _, family := range families { + if family == nil { + continue + } + if err := encoder.Encode(family); err != nil { + logger.Log(l, logger.Error, "msg", "unable to write metrics for family", "err", err) + break + } + } + }) +} diff --git a/collectors/metrics/cmd/metrics-collector/main_test.go b/collectors/metrics/cmd/metrics-collector/main_test.go new file mode 100644 index 000000000..43b667bbb --- /dev/null +++ b/collectors/metrics/cmd/metrics-collector/main_test.go @@ -0,0 +1,50 @@ +package main + +import ( + stdlog "log" + "os" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" +) + +func init() { + os.Setenv("UNIT_TEST", "true") +} + +func TestMultiWorkers(t *testing.T) { + + opt := &Options{ + Listen: "localhost:9002", + LimitBytes: 200 * 1024, + Rules: []string{`{__name__="instance:node_vmstat_pgmajfault:rate1m"}`}, + Interval: 4*time.Minute + 30*time.Second, + WorkerNum: 2, + SimulatedTimeseriesFile: "../../testdata/timeseries.txt", + From: "https://prometheus-k8s.openshift-monitoring.svc:9091", + ToUpload: "https://prometheus-k8s.openshift-monitoring.svc:9091", + LabelFlag: []string{ + "cluster=local-cluster", + "clusterID=245c2253-7b0d-4080-8e33-f6f0d6c6ff73", + }, + FromCAFile: "../../testdata/service-ca.crt", + FromTokenFile: "../../testdata/token", + } + + l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + l = level.NewFilter(l, logger.LogLevelFromString("debug")) + l = log.WithPrefix(l, "ts", log.DefaultTimestampUTC) + l = log.WithPrefix(l, "caller", log.DefaultCaller) + stdlog.SetOutput(log.NewStdlibAdapter(l)) + opt.Logger = l + + err := runMultiWorkers(opt) + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + +} diff --git a/collectors/metrics/pkg/forwarder/forwarder.go b/collectors/metrics/pkg/forwarder/forwarder.go new file mode 100644 index 000000000..ddf1842c9 --- /dev/null +++ b/collectors/metrics/pkg/forwarder/forwarder.go @@ -0,0 +1,479 @@ +// Copyright Contributors to the Open Cluster Management project + +package forwarder + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + + "github.com/prometheus/client_golang/prometheus" + clientmodel "github.com/prometheus/client_model/go" + + metricshttp "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/http" + rlogger "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/metricfamily" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/metricsclient" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/simulator" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/status" +) + +const ( + failedStatusReportMsg = "Failed to report status" +) + +var ( + gaugeFederateSamples = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "federate_samples", + Help: "Tracks the number of samples per federation", + }) + gaugeFederateFilteredSamples = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "federate_filtered_samples", + Help: "Tracks the number of samples filtered per federation", + }) + gaugeFederateErrors = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "federate_errors", + Help: "The number of times forwarding federated metrics has failed", + }) +) + +type RuleMatcher interface { + MatchRules() []string +} + +func init() { + prometheus.MustRegister( + gaugeFederateErrors, gaugeFederateSamples, gaugeFederateFilteredSamples, + ) +} + +// Config defines the parameters that can be used to configure a worker. +// The only required field is `From`. +type Config struct { + From *url.URL + ToUpload *url.URL + FromToken string + FromTokenFile string + FromCAFile string + + AnonymizeLabels []string + AnonymizeSalt string + AnonymizeSaltFile string + Debug bool + Interval time.Duration + LimitBytes int64 + Rules []string + RecordingRules []string + RulesFile string + Transformer metricfamily.Transformer + + Logger log.Logger + SimulatedTimeseriesFile string +} + +// Worker represents a metrics forwarding agent. It collects metrics from a source URL and forwards them to a sink. +// A Worker should be configured with a `Config` and instantiated with the `New` func. +// Workers are thread safe; all access to shared fields are synchronized. +type Worker struct { + fromClient *metricsclient.Client + toClient *metricsclient.Client + from *url.URL + to *url.URL + + interval time.Duration + transformer metricfamily.Transformer + rules []string + recordingRules []string + + lastMetrics []*clientmodel.MetricFamily + lock sync.Mutex + reconfigure chan struct{} + + logger log.Logger + + simulatedTimeseriesFile string + + status status.StatusReport +} + +func createClients(cfg Config, interval time.Duration, + logger log.Logger) (*metricsclient.Client, *metricsclient.Client, metricfamily.MultiTransformer, error) { + + var transformer metricfamily.MultiTransformer + + // Configure the anonymization. + anonymizeSalt := cfg.AnonymizeSalt + if len(cfg.AnonymizeSalt) == 0 && len(cfg.AnonymizeSaltFile) > 0 { + data, err := ioutil.ReadFile(cfg.AnonymizeSaltFile) + if err != nil { + return nil, nil, transformer, fmt.Errorf("failed to read anonymize-salt-file: %v", err) + } + anonymizeSalt = strings.TrimSpace(string(data)) + } + if len(cfg.AnonymizeLabels) != 0 && len(anonymizeSalt) == 0 { + return nil, nil, transformer, fmt.Errorf("anonymize-salt must be specified if anonymize-labels is set") + } + if len(cfg.AnonymizeLabels) == 0 { + rlogger.Log(logger, rlogger.Warn, "msg", "not anonymizing any labels") + } + + // Configure a transformer. + if cfg.Transformer != nil { + transformer.With(cfg.Transformer) + } + if len(cfg.AnonymizeLabels) > 0 { + transformer.With(metricfamily.NewMetricsAnonymizer(anonymizeSalt, cfg.AnonymizeLabels, nil)) + } + + fromTransport := metricsclient.DefaultTransport(logger, false) + if len(cfg.FromCAFile) > 0 { + if fromTransport.TLSClientConfig == nil { + fromTransport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + } + pool, err := x509.SystemCertPool() + if err != nil { + return nil, nil, transformer, fmt.Errorf("failed to read system certificates: %v", err) + } + data, err := ioutil.ReadFile(cfg.FromCAFile) + if err != nil { + return nil, nil, transformer, fmt.Errorf("failed to read from-ca-file: %v", err) + } + if !pool.AppendCertsFromPEM(data) { + rlogger.Log(logger, rlogger.Warn, "msg", "no certs found in from-ca-file") + } + fromTransport.TLSClientConfig.RootCAs = pool + } else { + if fromTransport.TLSClientConfig == nil { + fromTransport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + /* #nosec */ + InsecureSkipVerify: true, + } + } + } + + // Create the `fromClient`. + fromClient := &http.Client{Transport: fromTransport} + if cfg.Debug { + fromClient.Transport = metricshttp.NewDebugRoundTripper(logger, fromClient.Transport) + } + if len(cfg.FromToken) == 0 && len(cfg.FromTokenFile) > 0 { + data, err := ioutil.ReadFile(cfg.FromTokenFile) + if err != nil { + return nil, nil, transformer, fmt.Errorf("unable to read from-token-file: %v", err) + } + cfg.FromToken = strings.TrimSpace(string(data)) + } + if len(cfg.FromToken) > 0 { + fromClient.Transport = metricshttp.NewBearerRoundTripper(cfg.FromToken, fromClient.Transport) + } + from := metricsclient.New(logger, fromClient, cfg.LimitBytes, interval, "federate_from") + + // Create the `toClient`. + + toTransport, err := metricsclient.MTLSTransport(logger) + if err != nil { + return nil, nil, transformer, errors.New(err.Error()) + } + toTransport.Proxy = http.ProxyFromEnvironment + toClient := &http.Client{Transport: toTransport} + if cfg.Debug { + toClient.Transport = metricshttp.NewDebugRoundTripper(logger, toClient.Transport) + } + to := metricsclient.New(logger, toClient, cfg.LimitBytes, interval, "federate_to") + return from, to, transformer, nil +} + +// New creates a new Worker based on the provided Config. If the Config contains invalid +// values, then an error is returned. +func New(cfg Config) (*Worker, error) { + if cfg.From == nil { + return nil, errors.New("a URL from which to scrape is required") + } + logger := log.With(cfg.Logger, "component", "forwarder") + rlogger.Log(logger, rlogger.Warn, "msg", cfg.ToUpload) + w := Worker{ + from: cfg.From, + interval: cfg.Interval, + reconfigure: make(chan struct{}), + to: cfg.ToUpload, + logger: log.With(cfg.Logger, "component", "forwarder/worker"), + simulatedTimeseriesFile: cfg.SimulatedTimeseriesFile, + } + + if w.interval == 0 { + w.interval = 4*time.Minute + 30*time.Second + } + + fromClient, toClient, transformer, err := createClients(cfg, w.interval, logger) + if err != nil { + return nil, err + } + w.fromClient = fromClient + w.toClient = toClient + w.transformer = transformer + + // Configure the matching rules. + rules := cfg.Rules + if len(cfg.RulesFile) > 0 { + data, err := ioutil.ReadFile(cfg.RulesFile) + if err != nil { + return nil, fmt.Errorf("unable to read match-file: %v", err) + } + rules = append(rules, strings.Split(string(data), "\n")...) + } + for i := 0; i < len(rules); { + s := strings.TrimSpace(rules[i]) + if len(s) == 0 { + rules = append(rules[:i], rules[i+1:]...) + continue + } + rules[i] = s + i++ + } + w.rules = rules + + // Configure the recording rules. + recordingRules := cfg.RecordingRules + for i := 0; i < len(recordingRules); { + s := strings.TrimSpace(recordingRules[i]) + if len(s) == 0 { + recordingRules = append(recordingRules[:i], recordingRules[i+1:]...) + continue + } + recordingRules[i] = s + i++ + } + w.recordingRules = recordingRules + + s, err := status.New(logger) + if err != nil { + return nil, fmt.Errorf("unable to create StatusReport: %v", err) + } + w.status = *s + + return &w, nil +} + +// Reconfigure temporarily stops a worker and reconfigures is with the provided Config. +// Is thread safe and can run concurrently with `LastMetrics` and `Run`. +func (w *Worker) Reconfigure(cfg Config) error { + worker, err := New(cfg) + if err != nil { + return fmt.Errorf("failed to reconfigure: %v", err) + } + + w.lock.Lock() + defer w.lock.Unlock() + + w.fromClient = worker.fromClient + w.toClient = worker.toClient + w.interval = worker.interval + w.from = worker.from + w.to = worker.to + w.transformer = worker.transformer + w.rules = worker.rules + w.recordingRules = worker.recordingRules + + // Signal a restart to Run func. + // Do this in a goroutine since we do not care if restarting the Run loop is asynchronous. + go func() { w.reconfigure <- struct{}{} }() + return nil +} + +func (w *Worker) LastMetrics() []*clientmodel.MetricFamily { + w.lock.Lock() + defer w.lock.Unlock() + return w.lastMetrics +} + +func (w *Worker) Run(ctx context.Context) { + for { + // Ensure that the Worker does not access critical configuration during a reconfiguration. + w.lock.Lock() + wait := w.interval + // The critical section ends here. + w.lock.Unlock() + + if err := w.forward(ctx); err != nil { + gaugeFederateErrors.Inc() + rlogger.Log(w.logger, rlogger.Error, "msg", "unable to forward results", "err", err) + wait = time.Minute + } + + select { + // If the context is cancelled, then we're done. + case <-ctx.Done(): + return + case <-time.After(wait): + // We want to be able to interrupt a sleep to immediately apply a new configuration. + case <-w.reconfigure: + } + } +} + +func (w *Worker) forward(ctx context.Context) error { + w.lock.Lock() + defer w.lock.Unlock() + + var families []*clientmodel.MetricFamily + var err error + if w.simulatedTimeseriesFile != "" { + families, err = simulator.FetchSimulatedTimeseries(w.simulatedTimeseriesFile) + if err != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", "failed fetch simulated timeseries", "err", err) + } + } else if os.Getenv("SIMULATE") == "true" { + families = simulator.SimulateMetrics(w.logger) + } else { + families, err = w.getFederateMetrics(ctx) + if err != nil { + statusErr := w.status.UpdateStatus("Degraded", "Degraded", "Failed to retrieve metrics") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + return err + } + + rfamilies, err := w.getRecordingMetrics(ctx) + if err != nil { + statusErr := w.status.UpdateStatus("Degraded", "Degraded", "Failed to retrieve recording metrics") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + } else { + families = append(families, rfamilies...) + } + } + + before := metricfamily.MetricsCount(families) + if err := metricfamily.Filter(families, w.transformer); err != nil { + statusErr := w.status.UpdateStatus("Degraded", "Degraded", "Failed to filter metrics") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + return err + } + + families = metricfamily.Pack(families) + after := metricfamily.MetricsCount(families) + + gaugeFederateSamples.Set(float64(before)) + gaugeFederateFilteredSamples.Set(float64(before - after)) + + w.lastMetrics = families + + if len(families) == 0 { + rlogger.Log(w.logger, rlogger.Warn, "msg", "no metrics to send, doing nothing") + statusErr := w.status.UpdateStatus("Available", "Available", "No metrics to send") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + return nil + } + + if w.to == nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", "to is nil, doing nothing") + statusErr := w.status.UpdateStatus("Available", "Available", "Metrics is not required to send") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + return nil + } + + req := &http.Request{Method: "POST", URL: w.to} + err = w.toClient.RemoteWrite(ctx, req, families, w.interval) + if err != nil { + statusErr := w.status.UpdateStatus("Degraded", "Degraded", "Failed to send metrics") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + } else if w.simulatedTimeseriesFile == "" { + statusErr := w.status.UpdateStatus("Available", "Available", "Cluster metrics sent successfully") + if statusErr != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", failedStatusReportMsg, "err", statusErr) + } + } + + return err +} + +func (w *Worker) getFederateMetrics(ctx context.Context) ([]*clientmodel.MetricFamily, error) { + var families []*clientmodel.MetricFamily + var err error + + // reset query from last invocation, otherwise match rules will be appended + from := w.from + from.RawQuery = "" + v := from.Query() + for _, rule := range w.rules { + v.Add("match[]", rule) + } + from.RawQuery = v.Encode() + + req := &http.Request{Method: "GET", URL: from} + families, err = w.fromClient.Retrieve(ctx, req) + if err != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", "Failed to retrieve metrics", "err", err) + return families, err + } + + return families, nil +} + +func (w *Worker) getRecordingMetrics(ctx context.Context) ([]*clientmodel.MetricFamily, error) { + var families []*clientmodel.MetricFamily + var e error + + from := w.from + originPath := from.Path + from.Path = "/api/v1/query" + // Path /api/v1/query is only used in getRecordingMetrics(), reset to origin path before return. + defer func() { + w.from.Path = originPath + }() + + for _, rule := range w.recordingRules { + var r map[string]string + err := json.Unmarshal(([]byte)(rule), &r) + if err != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", "Input error", "err", err) + e = err + continue + } + rname := r["name"] + rquery := r["query"] + + // reset query from last invocation, otherwise match rules will be appended + from.RawQuery = "" + v := w.from.Query() + v.Add("query", rquery) + from.RawQuery = v.Encode() + + req := &http.Request{Method: "GET", URL: from} + rfamilies, err := w.fromClient.RetrievRecordingMetrics(ctx, req, rname) + if err != nil { + rlogger.Log(w.logger, rlogger.Warn, "msg", "Failed to retrieve recording metrics", "err", err) + e = err + continue + } else { + families = append(families, rfamilies...) + } + } + + return families, e +} diff --git a/collectors/metrics/pkg/forwarder/forwarder_test.go b/collectors/metrics/pkg/forwarder/forwarder_test.go new file mode 100644 index 000000000..ddbe579fd --- /dev/null +++ b/collectors/metrics/pkg/forwarder/forwarder_test.go @@ -0,0 +1,260 @@ +// Copyright Contributors to the Open Cluster Management project +package forwarder + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "os" + "sync" + "testing" + + "github.com/go-kit/kit/log" +) + +func init() { + os.Setenv("UNIT_TEST", "true") +} + +func TestNew(t *testing.T) { + from, err := url.Parse("https://redhat.com") + if err != nil { + t.Fatalf("failed to parse `from` URL: %v", err) + } + toUpload, err := url.Parse("https://k8s.io") + if err != nil { + t.Fatalf("failed to parse `toUpload` URL: %v", err) + } + + tc := []struct { + c Config + err bool + }{ + { + // Empty configuration should error. + c: Config{Logger: log.NewNopLogger()}, + err: true, + }, + { + // Only providing a `From` should not error. + c: Config{ + From: from, + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Providing `From` and `ToUpload` should not error. + c: Config{ + From: from, + ToUpload: toUpload, + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Providing an invalid `FromTokenFile` file should error. + c: Config{ + From: from, + FromTokenFile: "/this/path/does/not/exist", + Logger: log.NewNopLogger(), + }, + err: true, + }, + { + // Providing only `AnonymizeSalt` should not error. + c: Config{ + From: from, + AnonymizeSalt: "1", + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Providing only `AnonymizeLabels` should error. + c: Config{ + From: from, + AnonymizeLabels: []string{"foo"}, + Logger: log.NewNopLogger(), + }, + err: true, + }, + { + // Providing only `AnonymizeSalt` and `AnonymizeLabels should not error. + c: Config{ + From: from, + AnonymizeLabels: []string{"foo"}, + AnonymizeSalt: "1", + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Providing an invalid `AnonymizeSaltFile` should error. + c: Config{ + From: from, + AnonymizeLabels: []string{"foo"}, + AnonymizeSaltFile: "/this/path/does/not/exist", + Logger: log.NewNopLogger(), + }, + err: true, + }, + { + // Providing `AnonymizeSalt` takes preference over an invalid `AnonymizeSaltFile` and should not error. + c: Config{ + From: from, + AnonymizeLabels: []string{"foo"}, + AnonymizeSalt: "1", + AnonymizeSaltFile: "/this/path/does/not/exist", + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Providing an invalid `FromCAFile` should error. + c: Config{ + From: from, + FromCAFile: "/this/path/does/not/exist", + Logger: log.NewNopLogger(), + }, + err: true, + }, + } + + for i := range tc { + if _, err := New(tc[i].c); (err != nil) != tc[i].err { + no := "no" + if tc[i].err { + no = "an" + } + t.Errorf("test case %d: got '%v', expected %s error", i, err, no) + } + } +} + +func TestReconfigure(t *testing.T) { + from, err := url.Parse("https://redhat.com") + if err != nil { + t.Fatalf("failed to parse `from` URL: %v", err) + } + c := Config{ + From: from, + Logger: log.NewNopLogger(), + } + w, err := New(c) + if err != nil { + t.Fatalf("failed to create new worker: %v", err) + } + + from2, err := url.Parse("https://redhat.com") + if err != nil { + t.Fatalf("failed to parse `from2` URL: %v", err) + } + + tc := []struct { + c Config + err bool + }{ + { + // Empty configuration should error. + c: Config{Logger: log.NewNopLogger()}, + err: true, + }, + { + // Configuration with new `From` should not error. + c: Config{ + From: from2, + Logger: log.NewNopLogger(), + }, + err: false, + }, + { + // Configuration with new invalid field should error. + c: Config{ + From: from, + FromTokenFile: "/this/path/does/not/exist", + Logger: log.NewNopLogger(), + }, + err: true, + }, + } + + for i := range tc { + if err := w.Reconfigure(tc[i].c); (err != nil) != tc[i].err { + no := "no" + if tc[i].err { + no = "an" + } + t.Errorf("test case %d: got %q, expected %s error", i, err, no) + } + } +} + +// TestRun tests the Run method of the Worker type. +// This test will: +// * instantiate a worker +// * configure the worker to make requests against a test server +// * in that test server, reconfigure the worker to make requests against a second test server +// * in the second test server, cancel the worker's context. +// This test will only succeed if the worker is able to be correctly reconfigured and canceled +// such that the Run method returns. +func TestRun(t *testing.T) { + c := Config{ + // Use a dummy URL. + From: &url.URL{}, + Logger: log.NewNopLogger(), + } + w, err := New(c) + if err != nil { + t.Fatalf("failed to create new worker: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + var once sync.Once + var wg sync.WaitGroup + + wg.Add(1) + // This is the second test server. We need to define it early so we can use its URL in the + // handler for the first test server. + // In this handler, we decrement the wait group and cancel the worker's context. + ts2 := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + cancel() + once.Do(wg.Done) + })) + defer ts2.Close() + + // This is the first test server. + // In this handler, we test the Reconfigure method of the worker and point it to the second + // test server. + ts1 := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { + go func() { + from, err := url.Parse(ts2.URL) + if err != nil { + t.Fatalf("failed to parse second test server URL: %v", err) + } + if err := w.Reconfigure(Config{From: from, Logger: log.NewNopLogger()}); err != nil { + t.Fatalf("failed to reconfigure worker with second test server url: %v", err) + } + }() + })) + defer ts1.Close() + + from, err := url.Parse(ts1.URL) + if err != nil { + t.Fatalf("failed to parse first test server URL: %v", err) + } + if err := w.Reconfigure(Config{From: from, Logger: log.NewNopLogger()}); err != nil { + t.Fatalf("failed to reconfigure worker with first test server url: %v", err) + } + + wg.Add(1) + // In this goroutine we run the worker and only decrement + // the wait group when the worker finishes running. + go func() { + w.Run(ctx) + wg.Done() + }() + + wg.Wait() +} diff --git a/collectors/metrics/pkg/http/client.go b/collectors/metrics/pkg/http/client.go new file mode 100644 index 000000000..634929026 --- /dev/null +++ b/collectors/metrics/pkg/http/client.go @@ -0,0 +1,101 @@ +package http + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + inFlightGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }, + []string{"client"}, + ) + + counter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method", "client"}, + ) + + dnsLatencyVec = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event", "client"}, + ) + + tlsLatencyVec = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event", "client"}, + ) + + histVec = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{"method", "client"}, + ) +) + +func init() { + prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) +} + +func NewInstrumentedRoundTripper(clientName string, next http.RoundTripper) http.RoundTripper { + trace := &promhttp.InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec. + WithLabelValues("dns_start", clientName). + Observe(t) + }, + DNSDone: func(t float64) { + dnsLatencyVec. + WithLabelValues("dns_done", clientName). + Observe(t) + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec. + WithLabelValues("tls_handshake_start", clientName). + Observe(t) + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec. + WithLabelValues("tls_handshake_done", clientName). + Observe(t) + }, + } + + inFlightGauge := inFlightGauge.WithLabelValues(clientName) + + counter := counter.MustCurryWith(prometheus.Labels{ + "client": clientName, + }) + + histVec := histVec.MustCurryWith(prometheus.Labels{ + "client": clientName, + }) + + return promhttp.InstrumentRoundTripperInFlight(inFlightGauge, + promhttp.InstrumentRoundTripperCounter(counter, + promhttp.InstrumentRoundTripperTrace(trace, + promhttp.InstrumentRoundTripperDuration(histVec, next), + ), + ), + ) +} diff --git a/collectors/metrics/pkg/http/roundtripper.go b/collectors/metrics/pkg/http/roundtripper.go new file mode 100644 index 000000000..ae05afc03 --- /dev/null +++ b/collectors/metrics/pkg/http/roundtripper.go @@ -0,0 +1,81 @@ +package http + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "unicode/utf8" + + "github.com/go-kit/kit/log" + + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" +) + +type bearerRoundTripper struct { + token string + wrapper http.RoundTripper +} + +func NewBearerRoundTripper(token string, rt http.RoundTripper) http.RoundTripper { + return &bearerRoundTripper{token: token, wrapper: rt} +} + +func (rt *bearerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", rt.token)) + return rt.wrapper.RoundTrip(req) +} + +type debugRoundTripper struct { + next http.RoundTripper + logger log.Logger +} + +func NewDebugRoundTripper(logger log.Logger, next http.RoundTripper) *debugRoundTripper { + return &debugRoundTripper{next, log.With(logger, "component", "http/debugroundtripper")} +} + +func (rt *debugRoundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) { + reqd, _ := httputil.DumpRequest(req, false) + reqBody := bodyToString(&req.Body) + + res, err = rt.next.RoundTrip(req) + if err != nil { + logger.Log(rt.logger, logger.Error, "err", err) + return + } + + resd, _ := httputil.DumpResponse(res, false) + resBody := bodyToString(&res.Body) + + logger.Log(rt.logger, logger.Debug, "msg", "round trip", "url", req.URL, + "requestdump", string(reqd), "requestbody", reqBody, + "responsedump", string(resd), "responsebody", resBody) + return +} + +func bodyToString(body *io.ReadCloser) string { + if *body == nil { + return "" + } + + var b bytes.Buffer + _, err := b.ReadFrom(*body) + if err != nil { + panic(err) + } + if err = (*body).Close(); err != nil { + panic(err) + } + *body = ioutil.NopCloser(&b) + + s := b.String() + if utf8.ValidString(s) { + return s + } + + return hex.Dump(b.Bytes()) +} diff --git a/collectors/metrics/pkg/http/routes.go b/collectors/metrics/pkg/http/routes.go new file mode 100644 index 000000000..85f0cf0e7 --- /dev/null +++ b/collectors/metrics/pkg/http/routes.go @@ -0,0 +1,50 @@ +package http + +import ( + "fmt" + "net/http" + "net/http/pprof" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// DebugRoutes adds the debug handlers to a mux. +func DebugRoutes(mux *http.ServeMux) *http.ServeMux { + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.Handle("/debug/pprof/block", pprof.Handler("block")) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} + +// HealthRoutes adds the health checks to a mux. +func HealthRoutes(mux *http.ServeMux) *http.ServeMux { + mux.HandleFunc("/healthz", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintln(w, "ok") }) + mux.HandleFunc("/healthz/ready", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintln(w, "ok") }) + return mux +} + +// MetricRoutes adds the metrics endpoint to a mux. +func MetricRoutes(mux *http.ServeMux) *http.ServeMux { + mux.Handle("/metrics", promhttp.Handler()) + return mux +} + +// ReloadRoutes adds the reload endpoint to a mux. +func ReloadRoutes(mux *http.ServeMux, reload func() error) *http.ServeMux { + mux.HandleFunc("/-/reload", func(w http.ResponseWriter, req *http.Request) { + if req.Method != http.MethodPost { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err := reload(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + }) + return mux +} diff --git a/collectors/metrics/pkg/logger/logger.go b/collectors/metrics/pkg/logger/logger.go new file mode 100644 index 000000000..633cbc841 --- /dev/null +++ b/collectors/metrics/pkg/logger/logger.go @@ -0,0 +1,60 @@ +package logger + +import ( + "fmt" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +type LogLevel string + +const ( + Debug LogLevel = "debug" + Info LogLevel = "info" + Warn LogLevel = "warn" + Error LogLevel = "error" +) + +// LogLevelFromString determines log level to string, defaults to all, +func LogLevelFromString(l string) level.Option { + switch l { + case "debug": + return level.AllowDebug() + case "info": + return level.AllowInfo() + case "warn": + return level.AllowWarn() + case "error": + return level.AllowError() + default: + return level.AllowAll() + } +} + +// Log is used to handle the error of logger.Log globally +func Log(log log.Logger, l LogLevel, keyvals ...interface{}) { + errkey := "failover_err_%d" + switch l { + case Debug: + err := level.Debug(log).Log(keyvals...) + if err != nil { + fmt.Sprintf(errkey, err) + } + case Info: + err := level.Info(log).Log(keyvals...) + if err != nil { + fmt.Sprintf(errkey, err) + } + case Warn: + err := level.Warn(log).Log(keyvals...) + if err != nil { + fmt.Sprintf(errkey, err) + } + case Error: + err := level.Error(log).Log(keyvals...) + if err != nil { + fmt.Sprintf(errkey, err) + } + } +} diff --git a/collectors/metrics/pkg/metricfamily/anonymize.go b/collectors/metrics/pkg/metricfamily/anonymize.go new file mode 100644 index 000000000..a9e5f5ab5 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/anonymize.go @@ -0,0 +1,82 @@ +package metricfamily + +import ( + "crypto/sha256" + "encoding/base64" + + clientmodel "github.com/prometheus/client_model/go" +) + +type AnonymizeMetrics struct { + salt string + global map[string]struct{} + byMetric map[string]map[string]struct{} +} + +// NewMetricsAnonymizer hashes label values on the incoming metrics using a cryptographic hash. +// Because the cardinality of most label values is low, only a portion of the hash is returned. +// To prevent rainbow tables from being used to recover the label value, each client should use +// a salt value. Because label values are expected to remain stable over many sessions, the salt +// must also be stable over the same time period. The salt should not be shared with the remote +// agent. This type is not thread-safe. +func NewMetricsAnonymizer(salt string, labels []string, metricsLabels map[string][]string) *AnonymizeMetrics { + global := make(map[string]struct{}) + for _, label := range labels { + global[label] = struct{}{} + } + byMetric := make(map[string]map[string]struct{}) + for name, labels := range metricsLabels { + l := make(map[string]struct{}) + for _, label := range labels { + l[label] = struct{}{} + } + byMetric[name] = l + } + return &AnonymizeMetrics{ + salt: salt, + global: global, + byMetric: byMetric, + } +} + +func (a *AnonymizeMetrics) Transform(family *clientmodel.MetricFamily) (bool, error) { + if family == nil { + return false, nil + } + if set, ok := a.byMetric[family.GetName()]; ok { + transformMetricLabelValues(a.salt, family.Metric, a.global, set) + } else { + transformMetricLabelValues(a.salt, family.Metric, a.global) + } + return true, nil +} + +func transformMetricLabelValues(salt string, metrics []*clientmodel.Metric, sets ...map[string]struct{}) { + for _, m := range metrics { + if m == nil { + continue + } + for _, pair := range m.Label { + if pair.Value == nil || *pair.Value == "" { + continue + } + name := pair.GetName() + for _, set := range sets { + _, ok := set[name] + if !ok { + continue + } + v := secureValueHash(salt, pair.GetValue()) + pair.Value = &v + break + } + } + } +} + +// secureValueHash hashes the input value for moderately low cardinality (< 1 million unique inputs) +// and converts it to a base64 string suitable for use as a label value in Prometheus. +func secureValueHash(salt, value string) string { + hash := sha256.Sum256([]byte(salt + value)) + return base64.RawURLEncoding.EncodeToString(hash[:9]) +} diff --git a/collectors/metrics/pkg/metricfamily/count.go b/collectors/metrics/pkg/metricfamily/count.go new file mode 100644 index 000000000..9e04a9ff5 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/count.go @@ -0,0 +1,16 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +type Count struct { + families int + metrics int +} + +func (t *Count) Metrics() int { return t.metrics } + +func (t *Count) Transform(family *clientmodel.MetricFamily) (bool, error) { + t.families++ + t.metrics += len(family.Metric) + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/drop_timestamp.go b/collectors/metrics/pkg/metricfamily/drop_timestamp.go new file mode 100644 index 000000000..c1b76d242 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/drop_timestamp.go @@ -0,0 +1,19 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +// DropTimestamp is a transformer that removes timestamps from metrics. +func DropTimestamp(family *clientmodel.MetricFamily) (bool, error) { + if family == nil { + return true, nil + } + + for _, m := range family.Metric { + if m == nil { + continue + } + m.TimestampMs = nil + } + + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/drop_timestamp_test.go b/collectors/metrics/pkg/metricfamily/drop_timestamp_test.go new file mode 100644 index 000000000..ecc064b97 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/drop_timestamp_test.go @@ -0,0 +1,113 @@ +package metricfamily + +import ( + "fmt" + "testing" + + clientmodel "github.com/prometheus/client_model/go" +) + +func TestDropTimestamp(t *testing.T) { + + family := func(name string, metrics ...*clientmodel.Metric) *clientmodel.MetricFamily { + families := &clientmodel.MetricFamily{Name: &name} + families.Metric = append(families.Metric, metrics...) + return families + } + + metric := func(timestamp *int64) *clientmodel.Metric { return &clientmodel.Metric{TimestampMs: timestamp} } + + timestamp := func(timestamp int64) *int64 { return ×tamp } + + type checkFunc func(family *clientmodel.MetricFamily, ok bool, err error) error + + isOK := func(want bool) checkFunc { + return func(_ *clientmodel.MetricFamily, got bool, _ error) error { + if want != got { + return fmt.Errorf("want ok %t, got %t", want, got) + } + return nil + } + } + + hasErr := func(want error) checkFunc { + return func(_ *clientmodel.MetricFamily, _ bool, got error) error { + if want != got { + return fmt.Errorf("want err %v, got %v", want, got) + } + return nil + } + } + + hasMetrics := func(want int) checkFunc { + return func(m *clientmodel.MetricFamily, _ bool, _ error) error { + if got := len(m.Metric); want != got { + return fmt.Errorf("want len(m.Metric)=%v, got %v", want, got) + } + return nil + } + } + + metricsHaveTimestamps := func(want bool) checkFunc { + return func(m *clientmodel.MetricFamily, _ bool, _ error) error { + for _, metric := range m.Metric { + if got := metric.TimestampMs != nil; want != got { + return fmt.Errorf("want metrics to have timestamp %t, got %t", want, got) + } + } + return nil + } + } + + for _, tc := range []struct { + family *clientmodel.MetricFamily + name string + checks []checkFunc + }{ + { + name: "nil family", + checks: []checkFunc{ + isOK(true), + hasErr(nil), + }, + }, + { + name: "family without timestamp", + family: family("foo"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + }, + }, + { + name: "family without timestamp", + family: family("foo", metric(nil)), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetrics(1), + metricsHaveTimestamps(false), + }, + }, + { + name: "family with timestamp", + family: family("foo", metric(nil), metric(timestamp(1))), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetrics(2), + metricsHaveTimestamps(false), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ok, err := DropTimestamp(tc.family) + + for _, check := range tc.checks { + if err := check(tc.family, ok, err); err != nil { + t.Error(err) + } + } + }) + } +} diff --git a/collectors/metrics/pkg/metricfamily/drop_unsorted.go b/collectors/metrics/pkg/metricfamily/drop_unsorted.go new file mode 100644 index 000000000..67cbc88dd --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/drop_unsorted.go @@ -0,0 +1,26 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +type DropUnsorted struct { + timestamp int64 +} + +func (o *DropUnsorted) Transform(family *clientmodel.MetricFamily) (bool, error) { + for i, m := range family.Metric { + if m == nil { + continue + } + var ts int64 + if m.TimestampMs != nil { + ts = *m.TimestampMs + } + if ts < o.timestamp { + family.Metric[i] = nil + continue + } + o.timestamp = ts + } + o.timestamp = 0 + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/elide.go b/collectors/metrics/pkg/metricfamily/elide.go new file mode 100644 index 000000000..c8e7f83b3 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/elide.go @@ -0,0 +1,40 @@ +package metricfamily + +import ( + prom "github.com/prometheus/client_model/go" +) + +type elide struct { + labelSet map[string]struct{} +} + +// NewElide creates a new elide transformer for the given metrics. +func NewElide(labels ...string) *elide { + labelSet := make(map[string]struct{}) + for i := range labels { + labelSet[labels[i]] = struct{}{} + } + + return &elide{labelSet} +} + +// Transform filters label pairs in the given metrics family, +// eliding labels. +func (t *elide) Transform(family *prom.MetricFamily) (bool, error) { + if family == nil || len(family.Metric) == 0 { + return true, nil + } + + for i := range family.Metric { + var filtered []*prom.LabelPair + for j := range family.Metric[i].Label { + if _, elide := t.labelSet[family.Metric[i].Label[j].GetName()]; elide { + continue + } + filtered = append(filtered, family.Metric[i].Label[j]) + } + family.Metric[i].Label = filtered + } + + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/elide_test.go b/collectors/metrics/pkg/metricfamily/elide_test.go new file mode 100644 index 000000000..236caec40 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/elide_test.go @@ -0,0 +1,216 @@ +package metricfamily + +import ( + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + clientmodel "github.com/prometheus/client_model/go" +) + +func TestElide(t *testing.T) { + family := func(metrics ...*clientmodel.Metric) *clientmodel.MetricFamily { + families := &clientmodel.MetricFamily{Name: proto.String("test")} + families.Metric = append(families.Metric, metrics...) + return families + } + + type checkFunc func(family *clientmodel.MetricFamily, ok bool, err error) error + + isOK := func(want bool) checkFunc { + return func(_ *clientmodel.MetricFamily, got bool, _ error) error { + if want != got { + return fmt.Errorf("want ok %t, got %t", want, got) + } + return nil + } + } + + hasErr := func(want error) checkFunc { + return func(_ *clientmodel.MetricFamily, _ bool, got error) error { + if want != got { + return fmt.Errorf("want err %v, got %v", want, got) + } + return nil + } + } + + metricIsNil := func(want bool) checkFunc { + return func(m *clientmodel.MetricFamily, _ bool, _ error) error { + if got := m == nil; want != got { + return fmt.Errorf("want metric to be nil=%t, got %t", want, got) + } + return nil + } + } + + hasMetricCount := func(want int) checkFunc { + return func(m *clientmodel.MetricFamily, _ bool, _ error) error { + if got := len(m.Metric); want != got { + return fmt.Errorf("want len(m.Metric)=%v, got %v", want, got) + } + return nil + } + } + + hasLabelCount := func(want ...int) checkFunc { + return func(family *clientmodel.MetricFamily, _ bool, _ error) error { + for i := range family.Metric { + if got := len(family.Metric[i].Label); got != want[i] { + return fmt.Errorf( + "want len(m.Metric[%v].Label)=%v, got %v", + i, want[i], got) + } + } + return nil + } + } + + hasLabels := func(want bool, labels ...string) checkFunc { + return func(family *clientmodel.MetricFamily, _ bool, _ error) error { + labelSet := make(map[string]struct{}) + for i := range family.Metric { + for j := range family.Metric[i].Label { + labelSet[family.Metric[i].Label[j].GetName()] = struct{}{} + } + } + + for _, label := range labels { + if _, got := labelSet[label]; want != got { + wants := "present" + if !want { + wants = "not present" + } + + gots := "is" + if !got { + gots = "isn't" + } + + return fmt.Errorf( + "want label %q be %s in metrics, but it %s", + label, wants, gots, + ) + } + } + + return nil + } + } + + metricWithLabels := func(labels ...string) *clientmodel.Metric { + var labelPairs []*clientmodel.LabelPair + for _, l := range labels { + labelPairs = append(labelPairs, &clientmodel.LabelPair{Name: proto.String(l)}) + } + return &clientmodel.Metric{Label: labelPairs} + } + + for _, tc := range []struct { + family *clientmodel.MetricFamily + elide *elide + name string + checks []checkFunc + }{ + { + name: "nil family", + family: nil, + elide: NewElide("elide"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + metricIsNil(true), + }, + }, + { + name: "empty family", + family: family(), + elide: NewElide("elide"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(0), + }, + }, + { + name: "one elide one retain", + family: family(metricWithLabels("retain", "elide")), + elide: NewElide("elide"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(1), + hasLabelCount(1), + hasLabels(false, "elide"), + hasLabels(true, "retain"), + }, + }, + { + name: "no match", + family: family(metricWithLabels("retain")), + elide: NewElide("elide"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(1), + hasLabelCount(1), + hasLabels(false, "elide"), + hasLabels(true, "retain"), + }, + }, + { + name: "single match", + family: family(metricWithLabels("elide")), + elide: NewElide("elide"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(1), + hasLabelCount(0), + hasLabels(false, "elide"), + }, + }, + { + name: "multiple retains, multiple elides", + family: family( + metricWithLabels("elide1", "elide2", "retain1", "retain2"), + ), + elide: NewElide("elide1", "elide2"), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(1), + hasLabelCount(2), + hasLabels(false, "elide1"), + hasLabels(false, "elide2"), + hasLabels(true, "retain1"), + hasLabels(true, "retain2"), + }, + }, + { + name: "empty elider", + family: family( + metricWithLabels("retain1", "retain2"), + ), + elide: NewElide(), + checks: []checkFunc{ + isOK(true), + hasErr(nil), + hasMetricCount(1), + hasLabelCount(2), + hasLabels(true, "retain1"), + hasLabels(true, "retain2"), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ok, err := tc.elide.Transform(tc.family) + + for _, check := range tc.checks { + if err := check(tc.family, ok, err); err != nil { + t.Error(err) + } + } + }) + } +} diff --git a/collectors/metrics/pkg/metricfamily/empty.go b/collectors/metrics/pkg/metricfamily/empty.go new file mode 100644 index 000000000..8879e293c --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/empty.go @@ -0,0 +1,12 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +func DropEmptyFamilies(family *clientmodel.MetricFamily) (bool, error) { + for _, m := range family.Metric { + if m != nil { + return true, nil + } + } + return false, nil +} diff --git a/collectors/metrics/pkg/metricfamily/expired.go b/collectors/metrics/pkg/metricfamily/expired.go new file mode 100644 index 000000000..62ba50ec7 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/expired.go @@ -0,0 +1,30 @@ +package metricfamily + +import ( + "time" + + clientmodel "github.com/prometheus/client_model/go" +) + +type dropExpiredSamples struct { + min int64 +} + +func NewDropExpiredSamples(min time.Time) Transformer { + return &dropExpiredSamples{ + min: min.Unix() * 1000, + } +} + +func (t *dropExpiredSamples) Transform(family *clientmodel.MetricFamily) (bool, error) { + for i, m := range family.Metric { + if m == nil { + continue + } + if ts := m.GetTimestampMs(); ts < t.min { + family.Metric[i] = nil + continue + } + } + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/invalid.go b/collectors/metrics/pkg/metricfamily/invalid.go new file mode 100644 index 000000000..99d2c2f79 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/invalid.go @@ -0,0 +1,192 @@ +package metricfamily + +import ( + "fmt" + "time" + + clientmodel "github.com/prometheus/client_model/go" +) + +type errorInvalidFederateSamples struct { + min int64 +} + +func NewErrorInvalidFederateSamples(min time.Time) Transformer { + return &errorInvalidFederateSamples{ + min: min.Unix() * 1000, + } +} + +func (t *errorInvalidFederateSamples) Transform(family *clientmodel.MetricFamily) (bool, error) { + name := family.GetName() + if len(name) == 0 { + return false, nil + } + if len(name) > 255 { + return false, fmt.Errorf("metrics_name cannot be longer than 255 characters") + } + if family.Type == nil { + return false, nil + } + switch t := *family.Type; t { + case clientmodel.MetricType_COUNTER: + case clientmodel.MetricType_GAUGE: + case clientmodel.MetricType_HISTOGRAM: + case clientmodel.MetricType_SUMMARY: + case clientmodel.MetricType_UNTYPED: + default: + return false, fmt.Errorf("unknown metric type %s", t) + } + + for _, m := range family.Metric { + if m == nil { + continue + } + for _, label := range m.Label { + if label.Name == nil || len(*label.Name) == 0 || len(*label.Name) > 255 { + return false, fmt.Errorf("label_name cannot be longer than 255 characters") + } + if label.Value == nil || len(*label.Value) > 255 { + return false, fmt.Errorf("label_value cannot be longer than 255 characters") + } + } + if m.TimestampMs == nil { + return false, ErrNoTimestamp + } + if *m.TimestampMs < t.min { + return false, ErrTimestampTooOld + } + switch t := *family.Type; t { + case clientmodel.MetricType_COUNTER: + if m.Counter == nil || m.Gauge != nil || m.Histogram != nil || m.Summary != nil || m.Untyped != nil { + return false, fmt.Errorf("metric type %s must have counter field set", t) + } + case clientmodel.MetricType_GAUGE: + if m.Counter != nil || m.Gauge == nil || m.Histogram != nil || m.Summary != nil || m.Untyped != nil { + return false, fmt.Errorf("metric type %s must have gauge field set", t) + } + case clientmodel.MetricType_HISTOGRAM: + if m.Counter != nil || m.Gauge != nil || m.Histogram == nil || m.Summary != nil || m.Untyped != nil { + return false, fmt.Errorf("metric type %s must have histogram field set", t) + } + case clientmodel.MetricType_SUMMARY: + if m.Counter != nil || m.Gauge != nil || m.Histogram != nil || m.Summary == nil || m.Untyped != nil { + return false, fmt.Errorf("metric type %s must have summary field set", t) + } + case clientmodel.MetricType_UNTYPED: + if m.Counter != nil || m.Gauge != nil || m.Histogram != nil || m.Summary != nil || m.Untyped == nil { + return false, fmt.Errorf("metric type %s must have untyped field set", t) + } + } + } + return true, nil +} + +type dropInvalidFederateSamples struct { + min int64 +} + +func NewDropInvalidFederateSamples(min time.Time) Transformer { + return &dropInvalidFederateSamples{ + min: min.Unix() * 1000, + } +} + +func (t *dropInvalidFederateSamples) Transform(family *clientmodel.MetricFamily) (bool, error) { + name := family.GetName() + if len(name) == 0 { + return false, nil + } + if len(name) > 255 { + return false, nil + } + if family.Type == nil { + return false, nil + } + switch t := *family.Type; t { + case clientmodel.MetricType_COUNTER: + case clientmodel.MetricType_GAUGE: + case clientmodel.MetricType_HISTOGRAM: + case clientmodel.MetricType_SUMMARY: + case clientmodel.MetricType_UNTYPED: + default: + return false, nil + } + + for i, m := range family.Metric { + if m == nil { + continue + } + packLabels := false + for j, label := range m.Label { + if label.Name == nil || len(*label.Name) == 0 || len(*label.Name) > 255 { + m.Label[j] = nil + packLabels = true + } + if label.Value == nil || len(*label.Value) > 255 { + m.Label[j] = nil + packLabels = true + } + } + if packLabels { + m.Label = PackLabels(m.Label) + } + if m.TimestampMs == nil || *m.TimestampMs < t.min { + family.Metric[i] = nil + continue + } + switch t := *family.Type; t { + case clientmodel.MetricType_COUNTER: + if m.Counter == nil || m.Gauge != nil || m.Histogram != nil || m.Summary != nil || m.Untyped != nil { + family.Metric[i] = nil + } + case clientmodel.MetricType_GAUGE: + if m.Counter != nil || m.Gauge == nil || m.Histogram != nil || m.Summary != nil || m.Untyped != nil { + family.Metric[i] = nil + } + case clientmodel.MetricType_HISTOGRAM: + if m.Counter != nil || m.Gauge != nil || m.Histogram == nil || m.Summary != nil || m.Untyped != nil { + family.Metric[i] = nil + } + case clientmodel.MetricType_SUMMARY: + if m.Counter != nil || m.Gauge != nil || m.Histogram != nil || m.Summary == nil || m.Untyped != nil { + family.Metric[i] = nil + } + case clientmodel.MetricType_UNTYPED: + if m.Counter != nil || m.Gauge != nil || m.Histogram != nil || m.Summary != nil || m.Untyped == nil { + family.Metric[i] = nil + } + } + } + return true, nil +} + +// PackLabels fills holes in the label slice by shifting items towards the zero index. +// It will modify the slice in place. +func PackLabels(labels []*clientmodel.LabelPair) []*clientmodel.LabelPair { + j := len(labels) + next := 0 +Found: + for i := 0; i < j; i++ { + if labels[i] != nil { + continue + } + // scan for the next non-nil metric + if next <= i { + next = i + 1 + } + for k := next; k < j; k++ { + if labels[k] == nil { + continue + } + // fill the current i with a non-nil metric + labels[i], labels[k] = labels[k], nil + next = k + 1 + continue Found + } + // no more valid metrics + labels = labels[:i] + break + } + return labels +} diff --git a/collectors/metrics/pkg/metricfamily/label.go b/collectors/metrics/pkg/metricfamily/label.go new file mode 100644 index 000000000..c39bf9787 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/label.go @@ -0,0 +1,76 @@ +package metricfamily + +import ( + "sync" + + clientmodel "github.com/prometheus/client_model/go" +) + +type LabelRetriever interface { + Labels() (map[string]string, error) +} + +type label struct { + labels map[string]*clientmodel.LabelPair + retriever LabelRetriever + mu sync.Mutex +} + +func NewLabel(labels map[string]string, retriever LabelRetriever) Transformer { + pairs := make(map[string]*clientmodel.LabelPair) + for k, v := range labels { + name, value := k, v + pairs[k] = &clientmodel.LabelPair{Name: &name, Value: &value} + } + return &label{ + labels: pairs, + retriever: retriever, + } +} + +func (t *label) Transform(family *clientmodel.MetricFamily) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + // lazily resolve the label retriever as needed + if t.retriever != nil && len(family.Metric) > 0 { + added, err := t.retriever.Labels() + if err != nil { + return false, err + } + t.retriever = nil + for k, v := range added { + name, value := k, v + t.labels[k] = &clientmodel.LabelPair{Name: &name, Value: &value} + } + } + for _, m := range family.Metric { + m.Label = appendLabels(m.Label, t.labels) + } + return true, nil +} + +func appendLabels(existing []*clientmodel.LabelPair, overrides map[string]*clientmodel.LabelPair) []*clientmodel.LabelPair { + var found []string + for i, pair := range existing { + name := pair.GetName() + if value, ok := overrides[name]; ok { + existing[i] = value + found = append(found, name) + } + } + for k, v := range overrides { + if !contains(found, k) { + existing = append(existing, v) + } + } + return existing +} + +func contains(values []string, s string) bool { + for _, v := range values { + if s == v { + return true + } + } + return false +} diff --git a/collectors/metrics/pkg/metricfamily/multi_transformer.go b/collectors/metrics/pkg/metricfamily/multi_transformer.go new file mode 100644 index 000000000..df0e103df --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/multi_transformer.go @@ -0,0 +1,42 @@ +package metricfamily + +import ( + clientmodel "github.com/prometheus/client_model/go" +) + +type MultiTransformer struct { + transformers []Transformer + builderFuncs []func() Transformer +} + +func (a *MultiTransformer) With(t Transformer) { + if t != nil { + a.transformers = append(a.transformers, t) + } +} + +func (a *MultiTransformer) WithFunc(f func() Transformer) { + a.builderFuncs = append(a.builderFuncs, f) +} + +func (a MultiTransformer) Transform(family *clientmodel.MetricFamily) (bool, error) { + var ts []Transformer + + for _, f := range a.builderFuncs { + ts = append(ts, f()) + } + + ts = append(ts, a.transformers...) + + for _, t := range ts { + ok, err := t.Transform(family) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + } + + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/none.go b/collectors/metrics/pkg/metricfamily/none.go new file mode 100644 index 000000000..c815d186d --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/none.go @@ -0,0 +1,5 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +func None(*clientmodel.MetricFamily) (bool, error) { return true, nil } diff --git a/collectors/metrics/pkg/metricfamily/overwrite.go b/collectors/metrics/pkg/metricfamily/overwrite.go new file mode 100644 index 000000000..4e17411c5 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/overwrite.go @@ -0,0 +1,51 @@ +// Copyright Contributors to the Open Cluster Management project + +package metricfamily + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + client "github.com/prometheus/client_model/go" +) + +// driftRange is used to observe timestamps being older than 5min, newer than 5min, +// or within the present (+-5min) +const driftRange = 5 * time.Minute + +var ( + overwrittenMetrics = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "metricscollector_overwritten_timestamps_total", + Help: "Number of timestamps that were in the past, present or future", + }, []string{"tense"}) +) + +func init() { + prometheus.MustRegister(overwrittenMetrics) +} + +// OverwriteTimestamps sets all timestamps to the current time. +func OverwriteTimestamps(now func() time.Time) TransformerFunc { + return func(family *client.MetricFamily) (bool, error) { + timestamp := now().Unix() * 1000 + for i, m := range family.Metric { + observeDrift(now, m.GetTimestampMs()) + + family.Metric[i].TimestampMs = ×tamp + } + return true, nil + } +} + +func observeDrift(now func() time.Time, ms int64) { + timestamp := time.Unix(ms/1000, 0) + + if timestamp.Before(now().Add(-driftRange)) { + overwrittenMetrics.WithLabelValues("past").Inc() + } else if timestamp.After(now().Add(driftRange)) { + overwrittenMetrics.WithLabelValues("future").Inc() + } else { + overwrittenMetrics.WithLabelValues("present").Inc() + } + +} diff --git a/collectors/metrics/pkg/metricfamily/pack.go b/collectors/metrics/pkg/metricfamily/pack.go new file mode 100644 index 000000000..52e6b3b8a --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/pack.go @@ -0,0 +1,62 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +func PackMetrics(family *clientmodel.MetricFamily) (bool, error) { + metrics := family.Metric + j := len(metrics) + next := 0 +Found: + for i := 0; i < j; i++ { + if metrics[i] != nil { + continue + } + // scan for the next non-nil metric + if next <= i { + next = i + 1 + } + for k := next; k < j; k++ { + if metrics[k] == nil { + continue + } + // fill the current i with a non-nil metric + metrics[i], metrics[k] = metrics[k], nil + next = k + 1 + continue Found + } + // no more valid metrics + family.Metric = metrics[:i] + break + } + return len(family.Metric) > 0, nil +} + +// Pack returns only families with metrics in the returned array, preserving the +// order of the original slice. Nil entries are removed from the slice. The returned +// slice may be empty. +func Pack(families []*clientmodel.MetricFamily) []*clientmodel.MetricFamily { + j := len(families) + next := 0 +Found: + for i := 0; i < j; i++ { + if families[i] != nil && len(families[i].Metric) > 0 { + continue + } + // scan for the next non-nil family + if next <= i { + next = i + 1 + } + for k := next; k < j; k++ { + if families[k] == nil || len(families[k].Metric) == 0 { + continue + } + // fill the current i with a non-nil family + families[i], families[k] = families[k], nil + next = k + 1 + continue Found + } + // no more valid families + return families[:i] + } + return families +} diff --git a/collectors/metrics/pkg/metricfamily/rename.go b/collectors/metrics/pkg/metricfamily/rename.go new file mode 100644 index 000000000..3e9b25326 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/rename.go @@ -0,0 +1,17 @@ +package metricfamily + +import clientmodel "github.com/prometheus/client_model/go" + +type RenameMetrics struct { + Names map[string]string +} + +func (m RenameMetrics) Transform(family *clientmodel.MetricFamily) (bool, error) { + if family == nil || family.Name == nil { + return true, nil + } + if replace, ok := m.Names[*family.Name]; ok { + family.Name = &replace + } + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/required.go b/collectors/metrics/pkg/metricfamily/required.go new file mode 100644 index 000000000..94afa61b5 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/required.go @@ -0,0 +1,43 @@ +package metricfamily + +import ( + "fmt" + + clientmodel "github.com/prometheus/client_model/go" +) + +type requireLabel struct { + labels map[string]string +} + +func NewRequiredLabels(labels map[string]string) Transformer { + return requireLabel{labels: labels} +} + +var ( + ErrRequiredLabelMissing = fmt.Errorf("a required label is missing from the metric") +) + +func (t requireLabel) Transform(family *clientmodel.MetricFamily) (bool, error) { + for k, v := range t.labels { + Metrics: + for _, m := range family.Metric { + if m == nil { + continue + } + for _, label := range m.Label { + if label == nil { + continue + } + if label.GetName() == k { + if label.GetValue() != v { + return false, fmt.Errorf("expected label %s to have value %s instead of %s", label.GetName(), v, label.GetValue()) + } + continue Metrics + } + } + return false, ErrRequiredLabelMissing + } + } + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/sort.go b/collectors/metrics/pkg/metricfamily/sort.go new file mode 100644 index 000000000..87ee9ce6b --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/sort.go @@ -0,0 +1,128 @@ +package metricfamily + +import ( + "sort" + + clientmodel "github.com/prometheus/client_model/go" +) + +func SortMetrics(family *clientmodel.MetricFamily) (bool, error) { + sort.Sort(MetricsByTimestamp(family.Metric)) + return true, nil +} + +type MetricsByTimestamp []*clientmodel.Metric + +func (m MetricsByTimestamp) Len() int { + return len(m) +} + +func (m MetricsByTimestamp) Less(i int, j int) bool { + a, b := m[i], m[j] + if a == nil { + return b != nil + } + if b == nil { + return false + } + if a.TimestampMs == nil { + return b.TimestampMs != nil + } + if b.TimestampMs == nil { + return false + } + return *a.TimestampMs < *b.TimestampMs +} + +func (m MetricsByTimestamp) Swap(i int, j int) { + m[i], m[j] = m[j], m[i] +} + +// MergeSortedWithTimestamps collapses metrics families with the same name into a single family, +// preserving the order of the metrics. Families must be dense (no nils for families or metrics), +// all metrics must be sorted, and all metrics must have timestamps. +func MergeSortedWithTimestamps(families []*clientmodel.MetricFamily) []*clientmodel.MetricFamily { + var dst *clientmodel.MetricFamily + for pos, src := range families { + if dst == nil { + dst = src + continue + } + if dst.GetName() != src.GetName() { + dst = nil + continue + } + + lenI, lenJ := len(dst.Metric), len(src.Metric) + + // if the ranges don't overlap, we can block merge + dstBegin, dstEnd := *dst.Metric[0].TimestampMs, *dst.Metric[lenI-1].TimestampMs + srcBegin, srcEnd := *src.Metric[0].TimestampMs, *src.Metric[lenJ-1].TimestampMs + if dstEnd < srcBegin { + dst.Metric = append(dst.Metric, src.Metric...) + families[pos] = nil + continue + } + if srcEnd < dstBegin { + dst.Metric = append(src.Metric, dst.Metric...) + families[pos] = nil + continue + } + + // zip merge + i, j := 0, 0 + result := make([]*clientmodel.Metric, 0, lenI+lenJ) + Merge: + for { + switch { + case j >= lenJ: + for ; i < lenI; i++ { + result = append(result, dst.Metric[i]) + } + break Merge + case i >= lenI: + for ; j < lenJ; j++ { + result = append(result, src.Metric[j]) + } + break Merge + default: + a, b := *dst.Metric[i].TimestampMs, *src.Metric[j].TimestampMs + if a <= b { + result = append(result, dst.Metric[i]) + i++ + } else { + result = append(result, src.Metric[j]) + j++ + } + } + } + dst.Metric = result + families[pos] = nil + } + return Pack(families) +} + +// PackedFamilyWithTimestampsByName sorts a packed slice of metrics +// (no nils, all families have at least one metric, and all metrics +// have timestamps) in order of metric name and then oldest sample +type PackedFamilyWithTimestampsByName []*clientmodel.MetricFamily + +func (families PackedFamilyWithTimestampsByName) Len() int { + return len(families) +} + +func (families PackedFamilyWithTimestampsByName) Less(i int, j int) bool { + a, b := families[i].GetName(), families[j].GetName() + if a < b { + return true + } + if a > b { + return false + } + tA, tB := *families[i].Metric[0].TimestampMs, *families[j].Metric[0].TimestampMs + return tA < tB +} + +func (families PackedFamilyWithTimestampsByName) Swap(i int, j int) { + families[i], families[j] = families[j], families[i] +} diff --git a/collectors/metrics/pkg/metricfamily/transform.go b/collectors/metrics/pkg/metricfamily/transform.go new file mode 100644 index 000000000..9d585425a --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/transform.go @@ -0,0 +1,41 @@ +package metricfamily + +import ( + clientmodel "github.com/prometheus/client_model/go" +) + +type Transformer interface { + Transform(*clientmodel.MetricFamily) (ok bool, err error) +} + +type TransformerFunc func(*clientmodel.MetricFamily) (ok bool, err error) + +func (f TransformerFunc) Transform(family *clientmodel.MetricFamily) (ok bool, err error) { + return f(family) +} + +// MetricsCount returns the number of unique metrics in the given families. It skips +// nil families but does not skip nil metrics. +func MetricsCount(families []*clientmodel.MetricFamily) int { + count := 0 + for _, family := range families { + if family == nil { + continue + } + count += len(family.Metric) + } + return count +} + +func Filter(families []*clientmodel.MetricFamily, filter Transformer) error { + for i, family := range families { + ok, err := filter.Transform(family) + if err != nil { + return err + } + if !ok { + families[i] = nil + } + } + return nil +} diff --git a/collectors/metrics/pkg/metricfamily/transform_test.go b/collectors/metrics/pkg/metricfamily/transform_test.go new file mode 100644 index 000000000..270d03f49 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/transform_test.go @@ -0,0 +1,117 @@ +package metricfamily + +import ( + "reflect" + "testing" + + clientmodel "github.com/prometheus/client_model/go" +) + +func family(name string, timestamps ...int64) *clientmodel.MetricFamily { + families := &clientmodel.MetricFamily{Name: &name} + for i := range timestamps { + families.Metric = append(families.Metric, &clientmodel.Metric{TimestampMs: ×tamps[i]}) + } + return families +} + +func metric(timestamp int64) *clientmodel.Metric { + return &clientmodel.Metric{ + TimestampMs: ×tamp, + } +} + +func TestPack(t *testing.T) { + a := family("A", 0) + b := family("B", 1) + c := family("C", 2) + d := family("D") + + tests := []struct { + name string + args []*clientmodel.MetricFamily + want []*clientmodel.MetricFamily + }{ + {name: "empty", args: []*clientmodel.MetricFamily{nil, nil, nil}, want: []*clientmodel.MetricFamily{}}, + {name: "begin", args: []*clientmodel.MetricFamily{nil, a, b}, want: []*clientmodel.MetricFamily{a, b}}, + {name: "middle", args: []*clientmodel.MetricFamily{a, nil, b}, want: []*clientmodel.MetricFamily{a, b}}, + {name: "end", args: []*clientmodel.MetricFamily{a, b, nil}, want: []*clientmodel.MetricFamily{a, b}}, + {name: "skip", args: []*clientmodel.MetricFamily{a, nil, b, nil, c}, want: []*clientmodel.MetricFamily{a, b, c}}, + {name: "removes empty", args: []*clientmodel.MetricFamily{d, d}, want: []*clientmodel.MetricFamily{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := Pack(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Pack() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPackMetrics(t *testing.T) { + tests := []struct { + name string + args *clientmodel.MetricFamily + want *clientmodel.MetricFamily + wantOk bool + wantErr bool + }{ + {name: "empty", args: &clientmodel.MetricFamily{}, want: &clientmodel.MetricFamily{}}, + { + name: "all nil", + args: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{nil, nil}}, + want: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{}}, + }, + { + name: "leading nil", + args: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{nil, metric(1)}}, + want: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{metric(1)}}, + wantOk: true, + }, + { + name: "trailing nil", + args: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{metric(1), nil}}, + want: &clientmodel.MetricFamily{Metric: []*clientmodel.Metric{metric(1)}}, + wantOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := PackMetrics(tt.args) + if got != tt.wantOk { + t.Errorf("PackMetrics() = %t, want %t", got, tt.wantOk) + } + if (gotErr != nil) != tt.wantErr { + t.Errorf("PackMetrics() = %v, want %t", gotErr, tt.wantErr) + } + if !reflect.DeepEqual(tt.args, tt.want) { + t.Errorf("PackMetrics() = %v, want %v", tt.args, tt.want) + } + }) + } +} + +func TestMergeSort(t *testing.T) { + tests := []struct { + name string + args []*clientmodel.MetricFamily + want []*clientmodel.MetricFamily + }{ + {name: "empty", args: []*clientmodel.MetricFamily{}, want: []*clientmodel.MetricFamily{}}, + {name: "single", args: []*clientmodel.MetricFamily{family("A", 1)}, want: []*clientmodel.MetricFamily{family("A", 1)}}, + {name: "merge", args: []*clientmodel.MetricFamily{family("A", 1), family("A", 2)}, want: []*clientmodel.MetricFamily{family("A", 1, 2)}}, + {name: "reverse merge", args: []*clientmodel.MetricFamily{family("A", 2), family("A", 1)}, want: []*clientmodel.MetricFamily{family("A", 1, 2)}}, + {name: "differ", args: []*clientmodel.MetricFamily{family("A", 2), family("B", 1)}, want: []*clientmodel.MetricFamily{family("A", 2), family("B", 1)}}, + {name: "zip merge", args: []*clientmodel.MetricFamily{family("A", 2, 4, 6), family("A", 1, 3, 5)}, want: []*clientmodel.MetricFamily{family("A", 1, 2, 3, 4, 5, 6)}}, + {name: "zip merge - dst longer", args: []*clientmodel.MetricFamily{family("A", 2, 4, 6), family("A", 3)}, want: []*clientmodel.MetricFamily{family("A", 2, 3, 4, 6)}}, + {name: "zip merge - src longer", args: []*clientmodel.MetricFamily{family("A", 4), family("A", 1, 3, 5)}, want: []*clientmodel.MetricFamily{family("A", 1, 3, 4, 5)}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := MergeSortedWithTimestamps(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("MergeSortedWithTimestamps() = %v, want %v", got, tt.want) + } + }) + } + +} diff --git a/collectors/metrics/pkg/metricfamily/unsorted.go b/collectors/metrics/pkg/metricfamily/unsorted.go new file mode 100644 index 000000000..4a2bf1ce8 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/unsorted.go @@ -0,0 +1,44 @@ +package metricfamily + +import ( + "fmt" + + clientmodel "github.com/prometheus/client_model/go" +) + +var ( + ErrUnsorted = fmt.Errorf("metrics in provided family are not in increasing timestamp order") + ErrNoTimestamp = fmt.Errorf("metrics in provided family do not have a timestamp") + ErrTimestampTooOld = fmt.Errorf("metrics in provided family have a timestamp that is too old, check clock skew") +) + +type errorOnUnsorted struct { + timestamp int64 + require bool +} + +func NewErrorOnUnsorted(requireTimestamp bool) Transformer { + return &errorOnUnsorted{ + require: requireTimestamp, + } +} + +func (t *errorOnUnsorted) Transform(family *clientmodel.MetricFamily) (bool, error) { + t.timestamp = 0 + for _, m := range family.Metric { + if m == nil { + continue + } + var ts int64 + if m.TimestampMs != nil { + ts = *m.TimestampMs + } else if t.require { + return false, ErrNoTimestamp + } + if ts < t.timestamp { + return false, ErrUnsorted + } + t.timestamp = ts + } + return true, nil +} diff --git a/collectors/metrics/pkg/metricfamily/whitelist.go b/collectors/metrics/pkg/metricfamily/whitelist.go new file mode 100644 index 000000000..aee099031 --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/whitelist.go @@ -0,0 +1,63 @@ +package metricfamily + +import ( + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +type allowlist [][]*labels.Matcher + +// NewAllowlist returns a Transformer that checks if at least one +// rule in the allowlist is true. +// This Transformer will nil metrics within a metric family that do not match a rule. +// Each given rule is transformed into a matchset. Matchsets are OR-ed. +// Individual matchers within a matchset are AND-ed, as in PromQL. +func NewAllowlist(rules []string) (Transformer, error) { + var ms [][]*labels.Matcher + for i := range rules { + matchers, err := parser.ParseMetricSelector(rules[i]) + if err != nil { + return nil, err + } + ms = append(ms, matchers) + } + return allowlist(ms), nil +} + +// Transform implements the Transformer interface. +func (t allowlist) Transform(family *clientmodel.MetricFamily) (bool, error) { + var ok bool +Metric: + for i, m := range family.Metric { + if m == nil { + continue + } + for _, matchset := range t { + if match(family.GetName(), m, matchset...) { + ok = true + continue Metric + } + } + family.Metric[i] = nil + } + return ok, nil +} + +// match checks whether every Matcher matches a given metric. +func match(name string, metric *clientmodel.Metric, matchers ...*labels.Matcher) bool { +Matcher: + for _, m := range matchers { + if m.Name == "__name__" && m.Matches(name) { + continue + } + for _, label := range metric.Label { + if label == nil || m.Name != label.GetName() || !m.Matches(label.GetValue()) { + continue + } + continue Matcher + } + return false + } + return true +} diff --git a/collectors/metrics/pkg/metricfamily/whitelist_test.go b/collectors/metrics/pkg/metricfamily/whitelist_test.go new file mode 100644 index 000000000..0ac268dda --- /dev/null +++ b/collectors/metrics/pkg/metricfamily/whitelist_test.go @@ -0,0 +1,206 @@ +package metricfamily + +import ( + "fmt" + "reflect" + "testing" + + clientmodel "github.com/prometheus/client_model/go" +) + +func familyWithLabels(name string, labels ...[]*clientmodel.LabelPair) *clientmodel.MetricFamily { + family := &clientmodel.MetricFamily{Name: &name} + time := int64(0) + for i := range labels { + family.Metric = append(family.Metric, &clientmodel.Metric{TimestampMs: &time, Label: labels[i]}) + } + return family +} + +func copyMetric(family *clientmodel.MetricFamily) *clientmodel.MetricFamily { + metric := make([]*clientmodel.Metric, len(family.Metric)) + copy(metric, family.Metric) + f := *family + f.Metric = metric + return &f +} + +func setNilMetric(family *clientmodel.MetricFamily, positions ...int) *clientmodel.MetricFamily { + f := copyMetric(family) + for _, position := range positions { + f.Metric[position] = nil + } + return f +} + +func TestAllowlist(t *testing.T) { + type checkFunc func(family *clientmodel.MetricFamily, ok bool, err error) error + + isOK := func(want bool) checkFunc { + return func(_ *clientmodel.MetricFamily, got bool, _ error) error { + if want != got { + return fmt.Errorf("want ok %t, got %t", want, got) + } + return nil + } + } + + hasErr := func(want error) checkFunc { + return func(_ *clientmodel.MetricFamily, _ bool, got error) error { + if want != got { + return fmt.Errorf("want err %v, got %v", want, got) + } + return nil + } + } + + deepEqual := func(want *clientmodel.MetricFamily) checkFunc { + return func(got *clientmodel.MetricFamily, _ bool, _ error) error { + if !reflect.DeepEqual(want, got) { + return fmt.Errorf("want metricfamily %v, got %v", want, got) + } + return nil + } + } + + strPnt := func(str string) *string { + return &str + } + + a := familyWithLabels("A", []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("POST"), + }, + }) + + b := familyWithLabels("B", []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("GET"), + }, + }) + + c := familyWithLabels("C", + []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("POST"), + }, + &clientmodel.LabelPair{ + Name: strPnt("status"), + Value: strPnt("200"), + }, + }, + []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("GET"), + }, + &clientmodel.LabelPair{ + Name: strPnt("status"), + Value: strPnt("200"), + }, + }, + []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("POST"), + }, + &clientmodel.LabelPair{ + Name: strPnt("status"), + Value: strPnt("500"), + }, + }, + []*clientmodel.LabelPair{ + &clientmodel.LabelPair{ + Name: strPnt("method"), + Value: strPnt("DELETE"), + }, + &clientmodel.LabelPair{ + Name: strPnt("status"), + Value: strPnt("200"), + }, + }, + ) + + for _, tc := range []struct { + name string + checks []checkFunc + family *clientmodel.MetricFamily + allowlister Transformer + }{ + { + name: "accept A", + family: a, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(a)}, + allowlister: mustMakeAllowlist(t, []string{"{__name__=\"A\"}"}), + }, + { + name: "reject B", + family: b, + checks: []checkFunc{isOK(false), hasErr(nil), deepEqual(setNilMetric(b, 0))}, + allowlister: mustMakeAllowlist(t, []string{"{__name__=\"A\"}"}), + }, + { + name: "accept C", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(c)}, + allowlister: mustMakeAllowlist(t, []string{"{__name__=\"C\"}"}), + }, + { + name: "reject C", + family: c, + checks: []checkFunc{isOK(false), hasErr(nil), deepEqual(setNilMetric(c, 0, 1, 2, 3))}, + allowlister: mustMakeAllowlist(t, []string{"{method=\"PUT\"}"}), + }, + { + name: "reject parts of C", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(setNilMetric(c, 0, 2, 3))}, + allowlister: mustMakeAllowlist(t, []string{"{__name__=\"C\",method=\"GET\"}"}), + }, + { + name: "reject different parts of C", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(setNilMetric(c, 2))}, + allowlister: mustMakeAllowlist(t, []string{"{status=\"200\"}"}), + }, + { + name: "multiple rules", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(setNilMetric(c, 0, 3))}, + allowlister: mustMakeAllowlist(t, []string{"{method=\"GET\"}", "{status=\"500\"}"}), + }, + { + name: "multiple rules complex", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(setNilMetric(c, 0, 1, 3))}, + allowlister: mustMakeAllowlist(t, []string{"{method=\"GET\",status=\"400\"}", "{status=\"500\"}"}), + }, + { + name: "multiple rules complex with rejection", + family: c, + checks: []checkFunc{isOK(true), hasErr(nil), deepEqual(setNilMetric(c, 1, 2))}, + allowlister: mustMakeAllowlist(t, []string{"{method=\"POST\",status=\"200\"}", "{method=\"DELETE\"}"}), + }, + } { + t.Run(tc.name, func(t *testing.T) { + f := copyMetric(tc.family) + ok, err := tc.allowlister.Transform(f) + for _, check := range tc.checks { + if err := check(f, ok, err); err != nil { + t.Error(err) + } + } + }) + } +} + +func mustMakeAllowlist(t *testing.T, rules []string) Transformer { + w, err := NewAllowlist(rules) + if err != nil { + t.Fatalf("failed to create new allowlist transformer: %v", err) + } + return w +} diff --git a/collectors/metrics/pkg/metricsclient/metricsclient.go b/collectors/metrics/pkg/metricsclient/metricsclient.go new file mode 100644 index 000000000..e62f0743c --- /dev/null +++ b/collectors/metrics/pkg/metricsclient/metricsclient.go @@ -0,0 +1,577 @@ +// Copyright Contributors to the Open Cluster Management project + +package metricsclient + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/cenkalti/backoff" + "github.com/go-kit/kit/log" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/promql" + + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/reader" +) + +const ( + nameLabelName = "__name__" + maxSeriesLength = 10000 +) + +var ( + gaugeRequestRetrieve = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "metricsclient_request_retrieve", + Help: "Tracks the number of metrics retrievals", + }, []string{"client", "status_code"}) + gaugeRequestSend = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "metricsclient_request_send", + Help: "Tracks the number of metrics sends", + }, []string{"client", "status_code"}) +) + +func init() { + prometheus.MustRegister( + gaugeRequestRetrieve, gaugeRequestSend, + ) +} + +type Client struct { + client *http.Client + maxBytes int64 + timeout time.Duration + metricsName string + logger log.Logger +} + +type PartitionedMetrics struct { + Families []*clientmodel.MetricFamily +} + +func New(logger log.Logger, client *http.Client, maxBytes int64, timeout time.Duration, metricsName string) *Client { + return &Client{ + client: client, + maxBytes: maxBytes, + timeout: timeout, + metricsName: metricsName, + logger: log.With(logger, "component", "metricsclient"), + } +} + +type MetricsJson struct { + Status string `json:"status"` + Data MetricsData `json:"data"` +} + +type MetricsData struct { + Type string `json:"resultType"` + Result []MetricsResult `json:"result"` +} + +type MetricsResult struct { + Metric map[string]string `json:"metric"` + Value []interface{} `json:"value"` +} + +func (c *Client) RetrievRecordingMetrics(ctx context.Context, req *http.Request, name string) ([]*clientmodel.MetricFamily, error) { + + ctx, cancel := context.WithTimeout(ctx, c.timeout) + req = req.WithContext(ctx) + defer cancel() + families := make([]*clientmodel.MetricFamily, 0, 100) + err := withCancel(ctx, c.client, req, func(resp *http.Response) error { + switch resp.StatusCode { + case http.StatusOK: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "200").Inc() + case http.StatusUnauthorized: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "401").Inc() + return fmt.Errorf("Prometheus server requires authentication: %s", resp.Request.URL) + case http.StatusForbidden: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "403").Inc() + return fmt.Errorf("Prometheus server forbidden: %s", resp.Request.URL) + case http.StatusBadRequest: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "400").Inc() + return fmt.Errorf("bad request: %s", resp.Request.URL) + default: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() + return fmt.Errorf("Prometheus server reported unexpected error code: %d", resp.StatusCode) + } + + decoder := json.NewDecoder(resp.Body) + var data MetricsJson + err := decoder.Decode(&data) + if err != nil { + logger.Log(c.logger, logger.Error, "msg", "failed to decode", "err", err) + return nil + } + vec := make(promql.Vector, 0, 100) + for _, r := range data.Data.Result { + var t int64 + var v float64 + t = int64(r.Value[0].(float64) * 1000) + v, _ = strconv.ParseFloat(r.Value[1].(string), 64) + ls := []labels.Label{} + for k, v := range r.Metric { + l := &labels.Label{ + Name: k, + Value: v, + } + ls = append(ls, *l) + } + vec = append(vec, promql.Sample{ + Metric: ls, + Point: promql.Point{T: t, V: v}, + }) + } + + for _, s := range vec { + protMetric := &clientmodel.Metric{ + Untyped: &clientmodel.Untyped{}, + } + protMetricFam := &clientmodel.MetricFamily{ + Type: clientmodel.MetricType_UNTYPED.Enum(), + Name: proto.String(name), + } + for _, l := range s.Metric { + if l.Value == "" { + // No value means unset. Never consider those labels. + // This is also important to protect against nameless metrics. + continue + } + protMetric.Label = append(protMetric.Label, &clientmodel.LabelPair{ + Name: proto.String(l.Name), + Value: proto.String(l.Value), + }) + } + + protMetric.TimestampMs = proto.Int64(s.T) + protMetric.Untyped.Value = proto.Float64(s.V) + + protMetricFam.Metric = append(protMetricFam.Metric, protMetric) + families = append(families, protMetricFam) + } + + return nil + }) + if err != nil { + return nil, err + } + + return families, nil +} + +func (c *Client) Retrieve(ctx context.Context, req *http.Request) ([]*clientmodel.MetricFamily, error) { + if req.Header == nil { + req.Header = make(http.Header) + } + req.Header.Set("Accept", strings.Join([]string{string(expfmt.FmtProtoDelim), string(expfmt.FmtText)}, " , ")) + + ctx, cancel := context.WithTimeout(ctx, c.timeout) + req = req.WithContext(ctx) + defer cancel() + + families := make([]*clientmodel.MetricFamily, 0, 100) + err := withCancel(ctx, c.client, req, func(resp *http.Response) error { + switch resp.StatusCode { + case http.StatusOK: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "200").Inc() + case http.StatusUnauthorized: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "401").Inc() + return fmt.Errorf("Prometheus server requires authentication: %s", resp.Request.URL) + case http.StatusForbidden: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "403").Inc() + return fmt.Errorf("Prometheus server forbidden: %s", resp.Request.URL) + case http.StatusBadRequest: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, "400").Inc() + return fmt.Errorf("bad request: %s", resp.Request.URL) + default: + gaugeRequestRetrieve.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() + return fmt.Errorf("Prometheus server reported unexpected error code: %d", resp.StatusCode) + } + + // read the response into memory + format := expfmt.ResponseFormat(resp.Header) + r := &reader.LimitedReader{R: resp.Body, N: c.maxBytes} + decoder := expfmt.NewDecoder(r, format) + for { + family := &clientmodel.MetricFamily{} + families = append(families, family) + if err := decoder.Decode(family); err != nil { + if err != io.EOF { + logger.Log(c.logger, logger.Error, "msg", "error reading body", "err", err) + } + break + } + } + + return nil + }) + if err != nil { + return nil, err + } + + return families, nil +} + +func (c *Client) Send(ctx context.Context, req *http.Request, families []*clientmodel.MetricFamily) error { + buf := &bytes.Buffer{} + if err := Write(buf, families); err != nil { + return err + } + + if req.Header == nil { + req.Header = make(http.Header) + } + req.Header.Set("Content-Type", string(expfmt.FmtProtoDelim)) + req.Header.Set("Content-Encoding", "snappy") + req.Body = ioutil.NopCloser(buf) + + ctx, cancel := context.WithTimeout(ctx, c.timeout) + req = req.WithContext(ctx) + defer cancel() + logger.Log(c.logger, logger.Debug, "msg", "start to send") + return withCancel(ctx, c.client, req, func(resp *http.Response) error { + defer func() { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + logger.Log(c.logger, logger.Error, "msg", "error copying body", "err", err) + } + if err := resp.Body.Close(); err != nil { + logger.Log(c.logger, logger.Error, "msg", "error closing body", "err", err) + } + }() + logger.Log(c.logger, logger.Debug, "msg", resp.StatusCode) + switch resp.StatusCode { + case http.StatusOK: + gaugeRequestSend.WithLabelValues(c.metricsName, "200").Inc() + case http.StatusUnauthorized: + gaugeRequestSend.WithLabelValues(c.metricsName, "401").Inc() + return fmt.Errorf("gateway server requires authentication: %s", resp.Request.URL) + case http.StatusForbidden: + gaugeRequestSend.WithLabelValues(c.metricsName, "403").Inc() + return fmt.Errorf("gateway server forbidden: %s", resp.Request.URL) + case http.StatusBadRequest: + gaugeRequestSend.WithLabelValues(c.metricsName, "400").Inc() + logger.Log(c.logger, logger.Debug, "msg", resp.Body) + return fmt.Errorf("gateway server bad request: %s", resp.Request.URL) + default: + gaugeRequestSend.WithLabelValues(c.metricsName, strconv.Itoa(resp.StatusCode)).Inc() + body, _ := ioutil.ReadAll(resp.Body) + if len(body) > 1024 { + body = body[:1024] + } + return fmt.Errorf("gateway server reported unexpected error code: %d: %s", resp.StatusCode, string(body)) + } + + return nil + }) +} + +func Read(r io.Reader) ([]*clientmodel.MetricFamily, error) { + decompress := snappy.NewReader(r) + decoder := expfmt.NewDecoder(decompress, expfmt.FmtProtoDelim) + families := make([]*clientmodel.MetricFamily, 0, 100) + for { + family := &clientmodel.MetricFamily{} + if err := decoder.Decode(family); err != nil { + if err == io.EOF { + break + } + return nil, err + } + families = append(families, family) + } + return families, nil +} + +func Write(w io.Writer, families []*clientmodel.MetricFamily) error { + // output the filtered set + compress := snappy.NewBufferedWriter(w) + encoder := expfmt.NewEncoder(compress, expfmt.FmtProtoDelim) + for _, family := range families { + if family == nil { + continue + } + if err := encoder.Encode(family); err != nil { + return err + } + } + if err := compress.Flush(); err != nil { + return err + } + return nil +} + +func withCancel(ctx context.Context, client *http.Client, req *http.Request, fn func(*http.Response) error) error { + resp, err := client.Do(req) + defer func() error { + if resp != nil { + if err = resp.Body.Close(); err != nil { + return err + } + } + return nil + }() + if err != nil { + return err + } + + done := make(chan struct{}) + go func() { + err = fn(resp) + close(done) + }() + + select { + case <-ctx.Done(): + closeErr := resp.Body.Close() + + // wait for the goroutine to finish. + <-done + + // err is propagated from the goroutine above + // if it is nil, we bubble up the close err, if any. + if err == nil { + err = closeErr + } + + // if there is no close err, + // we propagate the context context error. + if err == nil { + err = ctx.Err() + } + case <-done: + // propagate the err from the spawned goroutine, if any. + } + + return err +} + +func MTLSTransport(logger log.Logger) (*http.Transport, error) { + testMode := os.Getenv("UNIT_TEST") != "" + caCertFile := "/tlscerts/ca/ca.crt" + tlsKeyFile := "/tlscerts/certs/tls.key" + tlsCrtFile := "/tlscerts/certs/tls.crt" + if testMode { + caCertFile = "../../testdata/tls/ca.crt" + tlsKeyFile = "../../testdata/tls/tls.key" + tlsCrtFile = "../../testdata/tls/tls.crt" + } + // Load Server CA cert + caCert, err := ioutil.ReadFile(caCertFile) + if err != nil { + return nil, errors.Wrap(err, "failed to load server ca cert file") + } + // Load client cert signed by Client CA + cert, err := tls.LoadX509KeyPair(tlsCrtFile, tlsKeyFile) + if err != nil { + return nil, errors.Wrap(err, "failed to load client ca cert") + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + // Setup HTTPS client + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + } + return &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + TLSClientConfig: tlsConfig, + }, nil + +} + +func DefaultTransport(logger log.Logger, isTLS bool) *http.Transport { + return &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + } +} + +func convertToTimeseries(p *PartitionedMetrics, now time.Time) ([]prompb.TimeSeries, error) { + var timeseries []prompb.TimeSeries + + timestamp := now.UnixNano() / int64(time.Millisecond) + for _, f := range p.Families { + for _, m := range f.Metric { + var ts prompb.TimeSeries + + labelpairs := []prompb.Label{{ + Name: nameLabelName, + Value: *f.Name, + }} + + for _, l := range m.Label { + labelpairs = append(labelpairs, prompb.Label{ + Name: *l.Name, + Value: *l.Value, + }) + } + + s := prompb.Sample{ + Timestamp: *m.TimestampMs, + } + // If the sample is in the future, overwrite it. + if *m.TimestampMs > timestamp { + s.Timestamp = timestamp + } + + switch *f.Type { + case clientmodel.MetricType_COUNTER: + s.Value = *m.Counter.Value + case clientmodel.MetricType_GAUGE: + s.Value = *m.Gauge.Value + case clientmodel.MetricType_UNTYPED: + s.Value = *m.Untyped.Value + default: + return nil, fmt.Errorf("metric type %s not supported", f.Type.String()) + } + + ts.Labels = append(ts.Labels, labelpairs...) + ts.Samples = append(ts.Samples, s) + + timeseries = append(timeseries, ts) + } + } + + return timeseries, nil +} + +// RemoteWrite is used to push the metrics to remote thanos endpoint +func (c *Client) RemoteWrite(ctx context.Context, req *http.Request, + families []*clientmodel.MetricFamily, interval time.Duration) error { + + timeseries, err := convertToTimeseries(&PartitionedMetrics{Families: families}, time.Now()) + if err != nil { + msg := "failed to convert timeseries" + logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) + return fmt.Errorf(msg) + } + + if len(timeseries) == 0 { + logger.Log(c.logger, logger.Info, "msg", "no time series to forward to receive endpoint") + return nil + } + logger.Log(c.logger, logger.Debug, "timeseries number", len(timeseries)) + + //uncomment here to generate timeseries + /* + for i := 0; i < len(families); i++ { + var buff bytes.Buffer + textEncoder := expfmt.NewEncoder(&buff, expfmt.FmtText) + err = textEncoder.Encode(families[i]) + if err != nil { + logger.Log(c.logger, logger.Error, "unexpected error during encode", err.Error()) + } + fmt.Println(string(buff.Bytes())) + } + */ + + for i := 0; i < len(timeseries); i += maxSeriesLength { + length := len(timeseries) + if i+maxSeriesLength < length { + length = i + maxSeriesLength + } + subTimeseries := timeseries[i:length] + + wreq := &prompb.WriteRequest{Timeseries: subTimeseries} + data, err := proto.Marshal(wreq) + if err != nil { + msg := "failed to marshal proto" + logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) + return fmt.Errorf(msg) + } + compressed := snappy.Encode(nil, data) + + // retry RemoteWrite with exponential back-off + b := backoff.NewExponentialBackOff() + // Do not set max elapsed time more than half the scrape interval + halfInterval := len(timeseries) * 2 / maxSeriesLength + if halfInterval < 2 { + halfInterval = 2 + } + b.MaxElapsedTime = interval / time.Duration(halfInterval) + retryable := func() error { + return c.sendRequest(req.URL.String(), compressed) + } + notify := func(err error, t time.Duration) { + msg := fmt.Sprintf("error: %v happened at time: %v", err, t) + logger.Log(c.logger, logger.Warn, "msg", msg) + } + err = backoff.RetryNotify(retryable, b, notify) + if err != nil { + return err + } + } + msg := fmt.Sprintf("Metrics pushed successfully") + logger.Log(c.logger, logger.Info, "msg", msg) + return nil +} + +func (c *Client) sendRequest(serverURL string, body []byte) error { + req1, err := http.NewRequest(http.MethodPost, serverURL, bytes.NewBuffer(body)) + if err != nil { + msg := "failed to create forwarding request" + logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) + return fmt.Errorf(msg) + } + + //req.Header.Add("THANOS-TENANT", tenantID) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + req1 = req1.WithContext(ctx) + + resp, err := c.client.Do(req1) + if err != nil { + msg := "failed to forward request" + logger.Log(c.logger, logger.Warn, "msg", msg, "err", err) + return fmt.Errorf(msg) + } + + if resp.StatusCode/100 != 2 { + // surfacing upstreams error to our users too + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + logger.Log(c.logger, logger.Warn, err) + } + bodyString := string(bodyBytes) + msg := fmt.Sprintf("response status code is %s, response body is %s", resp.Status, bodyString) + logger.Log(c.logger, logger.Warn, msg) + if resp.StatusCode != http.StatusConflict { + return fmt.Errorf(msg) + } + } + return nil +} diff --git a/collectors/metrics/pkg/metricsclient/metricsclient_test.go b/collectors/metrics/pkg/metricsclient/metricsclient_test.go new file mode 100644 index 000000000..fb6a503c4 --- /dev/null +++ b/collectors/metrics/pkg/metricsclient/metricsclient_test.go @@ -0,0 +1,211 @@ +// Copyright Contributors to the Open Cluster Management project +package metricsclient + +import ( + "fmt" + "net/http" + "reflect" + "testing" + "time" + + "github.com/go-kit/kit/log" + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/prompb" +) + +func TestDefaultTransport(t *testing.T) { + logger := log.NewNopLogger() + want := &http.Transport{ + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + } + http := DefaultTransport(logger, true) + if http.Dial == nil || reflect.TypeOf(http) != reflect.TypeOf(want) { + t.Errorf("Default transport doesn't match expected format") + } + +} + +func Test_convertToTimeseries(t *testing.T) { + counter := clientmodel.MetricType_COUNTER + untyped := clientmodel.MetricType_UNTYPED + gauge := clientmodel.MetricType_GAUGE + + fooMetricName := "foo_metric" + fooHelp := "foo help text" + fooLabelName := "foo" + fooLabelValue1 := "bar" + fooLabelValue2 := "baz" + + barMetricName := "bar_metric" + barHelp := "bar help text" + barLabelName := "bar" + barLabelValue1 := "baz" + + value42 := 42.0 + value50 := 50.0 + timestamp := int64(1596948588956) //15615582020000) + now := time.Now() + nowTimestamp := now.UnixNano() / int64(time.Millisecond) + fmt.Println("timestamp: ", timestamp) + + fmt.Println("nowTimestamp: ", nowTimestamp) + tests := []struct { + name string + in *PartitionedMetrics + want []prompb.TimeSeries + }{{ + name: "counter", + in: &PartitionedMetrics{ + Families: []*clientmodel.MetricFamily{{ + Name: &fooMetricName, + Help: &fooHelp, + Type: &counter, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue1}}, + Counter: &clientmodel.Counter{Value: &value42}, + TimestampMs: ×tamp, + }, { + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue2}}, + Counter: &clientmodel.Counter{Value: &value50}, + TimestampMs: ×tamp, + }}, + }, { + Name: &barMetricName, + Help: &barHelp, + Type: &counter, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &barLabelName, Value: &barLabelValue1}}, + Counter: &clientmodel.Counter{Value: &value42}, + TimestampMs: ×tamp, + }}, + }}, + }, + want: []prompb.TimeSeries{{ + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue2}}, + Samples: []prompb.Sample{{Value: value50, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: barMetricName}, {Name: barLabelName, Value: barLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }}, + }, { + name: "gauge", + in: &PartitionedMetrics{ + Families: []*clientmodel.MetricFamily{{ + Name: &fooMetricName, + Help: &fooHelp, + Type: &gauge, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue1}}, + Gauge: &clientmodel.Gauge{Value: &value42}, + TimestampMs: ×tamp, + }, { + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue2}}, + Gauge: &clientmodel.Gauge{Value: &value50}, + TimestampMs: ×tamp, + }}, + }, { + Name: &barMetricName, + Help: &barHelp, + Type: &gauge, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &barLabelName, Value: &barLabelValue1}}, + Gauge: &clientmodel.Gauge{Value: &value42}, + TimestampMs: ×tamp, + }}, + }}, + }, + want: []prompb.TimeSeries{{ + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue2}}, + Samples: []prompb.Sample{{Value: value50, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: barMetricName}, {Name: barLabelName, Value: barLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }}, + }, { + name: "untyped", + in: &PartitionedMetrics{ + Families: []*clientmodel.MetricFamily{{ + Name: &fooMetricName, + Help: &fooHelp, + Type: &untyped, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue1}}, + Untyped: &clientmodel.Untyped{Value: &value42}, + TimestampMs: ×tamp, + }, { + Label: []*clientmodel.LabelPair{{Name: &fooLabelName, Value: &fooLabelValue2}}, + Untyped: &clientmodel.Untyped{Value: &value50}, + TimestampMs: ×tamp, + }}, + }, { + Name: &barMetricName, + Help: &barHelp, + Type: &untyped, + Metric: []*clientmodel.Metric{{ + Label: []*clientmodel.LabelPair{{Name: &barLabelName, Value: &barLabelValue1}}, + Untyped: &clientmodel.Untyped{Value: &value42}, + TimestampMs: ×tamp, + }}, + }}, + }, + want: []prompb.TimeSeries{{ + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: fooMetricName}, {Name: fooLabelName, Value: fooLabelValue2}}, + Samples: []prompb.Sample{{Value: value50, Timestamp: timestamp}}, + }, { + Labels: []prompb.Label{{Name: nameLabelName, Value: barMetricName}, {Name: barLabelName, Value: barLabelValue1}}, + Samples: []prompb.Sample{{Value: value42, Timestamp: timestamp}}, + }}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + out, err := convertToTimeseries(tt.in, now) + if err != nil { + t.Errorf("converting timeseries errored: %v", err) + } + if ok, err := timeseriesEqual(tt.want, out); !ok { + // t.Error("want: ", tt.want) + // t.Error("out: ", out) + + t.Errorf("timeseries don't match: %v", err) + } + }) + } +} + +func timeseriesEqual(t1 []prompb.TimeSeries, t2 []prompb.TimeSeries) (bool, error) { + if len(t1) != len(t2) { + return false, fmt.Errorf("timeseries don't match amount of series: %d != %d", len(t1), len(t2)) + } + + for i, t := range t1 { + for j, l := range t.Labels { + if t2[i].Labels[j].Name != l.Name { + return false, fmt.Errorf("label names don't match: %s != %s", t2[i].Labels[j].Name, l.Name) + } + if t2[i].Labels[j].Value != l.Value { + return false, fmt.Errorf("label values don't match: %s != %s", t2[i].Labels[j].Value, l.Value) + } + } + + for j, s := range t.Samples { + if t2[i].Samples[j].Timestamp != s.Timestamp { + return false, fmt.Errorf("sample timestamps don't match: %d != %d", t2[i].Samples[j].Timestamp, s.Timestamp) + } + if t2[i].Samples[j].Value != s.Value { + return false, fmt.Errorf("sample values don't match: %f != %f", t2[i].Samples[j].Value, s.Value) + } + } + } + + return true, nil +} diff --git a/collectors/metrics/pkg/reader/reader.go b/collectors/metrics/pkg/reader/reader.go new file mode 100644 index 000000000..f7f8c558a --- /dev/null +++ b/collectors/metrics/pkg/reader/reader.go @@ -0,0 +1,50 @@ +package reader + +import ( + "fmt" + "io" +) + +type limitReadCloser struct { + io.Reader + closer io.ReadCloser +} + +func NewLimitReadCloser(r io.ReadCloser, n int64) io.ReadCloser { + return limitReadCloser{ + Reader: LimitReader(r, n), + closer: r, + } +} + +func (c limitReadCloser) Close() error { + return c.closer.Close() +} + +var ErrTooLong = fmt.Errorf("the incoming sample data is too long") + +// LimitReader returns a Reader that reads from r +// but stops with ErrTooLong after n bytes. +// The underlying implementation is a *LimitedReader. +func LimitReader(r io.Reader, n int64) io.Reader { return &LimitedReader{r, n} } + +// A LimitedReader reads from R but limits the amount of +// data returned to just N bytes. Each call to Read +// updates N to reflect the new amount remaining. +// Read returns ErrTooLong when N <= 0 or when the underlying R returns EOF. +type LimitedReader struct { + R io.Reader // underlying reader + N int64 // max bytes remaining +} + +func (l *LimitedReader) Read(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, ErrTooLong + } + if int64(len(p)) > l.N { + p = p[0:l.N] + } + n, err = l.R.Read(p) + l.N -= int64(n) + return +} diff --git a/collectors/metrics/pkg/reader/reader_test.go b/collectors/metrics/pkg/reader/reader_test.go new file mode 100644 index 000000000..4c46da2fb --- /dev/null +++ b/collectors/metrics/pkg/reader/reader_test.go @@ -0,0 +1,46 @@ +package reader + +import ( + "bytes" + "io" + "strings" + "testing" +) + +func TestRead(t *testing.T) { + tests := []struct { + name string + input string + expectedString string + }{ + { + name: "Read full strings", + input: "Hello", + expectedString: "Hello", + }, + { + name: "Cut the excess and drop the rest", + input: "Hello world", + expectedString: "Hello wo", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &LimitedReader{R: strings.NewReader(tt.input), N: 8} + b := make([]byte, 8) + for { + _, err := r.Read(b) + if err == io.EOF { + break + } else if err == ErrTooLong { + break + } + } + + if strings.Compare(string(bytes.Trim(b, "\x00")), tt.expectedString) != 0 { + t.Errorf("%v is not equal to the expected: %v", string(b), tt.expectedString) + } + }) + } +} diff --git a/collectors/metrics/pkg/simulator/simulator.go b/collectors/metrics/pkg/simulator/simulator.go new file mode 100644 index 000000000..3c99ed526 --- /dev/null +++ b/collectors/metrics/pkg/simulator/simulator.go @@ -0,0 +1,111 @@ +// Copyright Contributors to the Open Cluster Management project + +package simulator + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "math/big" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-kit/kit/log" + + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + rlogger "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" +) + +const ( + defaultMetrisNumber = 1000 + defaultLabelNumber = 19 + metricsNamePrefix = "simulated_metrics" + labelPrefix = "label" + labelValuePrefix = "label-value-prefix" +) + +func SimulateMetrics(logger log.Logger) []*clientmodel.MetricFamily { + metrisNumber, err := strconv.Atoi(os.Getenv("SIMULATE_METRICS_NUM")) + if err != nil { + metrisNumber = defaultMetrisNumber + } + labelNumber, err := strconv.Atoi(os.Getenv("SIMULATE_LABEL_NUM")) + if err != nil { + labelNumber = defaultLabelNumber + } + + families := make([]*clientmodel.MetricFamily, 0, 100) + timestamp := time.Now().UnixNano() / int64(time.Millisecond) + var sb strings.Builder + for i := 0; i < metrisNumber; i++ { + sb.WriteString(fmt.Sprintf("%s_%d{", metricsNamePrefix, i/1000)) + for j := 0; j < labelNumber; j++ { + if j == 0 { + sb.WriteString(fmt.Sprintf("%s_%d=\"%s-%d--%d\"", labelPrefix, j, labelValuePrefix, i/10, i%10)) + } else { + sb.WriteString(fmt.Sprintf("%s_%d=\"%s-%d\"", labelPrefix, j, labelValuePrefix, i%10)) + } + + if j != labelNumber-1 { + sb.WriteString(",") + } + } + sb.WriteString("} ") + sb.WriteString(fmt.Sprintf("%f %d", randFloat64(), timestamp)) + sb.WriteString("\n") + } + //rlogger.Log(logger, rlogger.Error, "data", sb.String()) + r := ioutil.NopCloser(bytes.NewReader([]byte(sb.String()))) + decoder := expfmt.NewDecoder(r, expfmt.FmtText) + for { + family := &clientmodel.MetricFamily{} + families = append(families, family) + if err := decoder.Decode(family); err != nil { + if err != io.EOF { + rlogger.Log(logger, rlogger.Error, "msg", "error reading body", "err", err) + } + break + } + } + + return families +} + +func randFloat64() float64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(1<<62)) + if err != nil { + return 0 + } + + return (float64(nBig.Int64()) / float64(1<<62)) +} + +func FetchSimulatedTimeseries(timeseriesFile string) ([]*clientmodel.MetricFamily, error) { + timestamp := time.Now().UnixNano() / int64(time.Millisecond) + + reader, err := os.Open(filepath.Clean(timeseriesFile)) + if err != nil { + return nil, err + } + + var parser expfmt.TextParser + + parsed, err := parser.TextToMetricFamilies(reader) + if err != nil { + return nil, err + } + var families []*clientmodel.MetricFamily + for _, mf := range parsed { + for _, m := range mf.Metric { + m.TimestampMs = ×tamp + } + families = append(families, mf) + } + return families, nil +} diff --git a/collectors/metrics/pkg/simulator/simulator_test.go b/collectors/metrics/pkg/simulator/simulator_test.go new file mode 100644 index 000000000..6839b9cfc --- /dev/null +++ b/collectors/metrics/pkg/simulator/simulator_test.go @@ -0,0 +1,14 @@ +// Copyright Contributors to the Open Cluster Management project + +package simulator + +import ( + "testing" +) + +func TestFetchSimulatedTimeseries(t *testing.T) { + _, err := FetchSimulatedTimeseries("../../testdata/timeseries.txt") + if err != nil { + t.Fatal(err) + } +} diff --git a/collectors/metrics/pkg/status/status.go b/collectors/metrics/pkg/status/status.go new file mode 100644 index 000000000..0c62a111a --- /dev/null +++ b/collectors/metrics/pkg/status/status.go @@ -0,0 +1,131 @@ +// Copyright Contributors to the Open Cluster Management project + +package status + +import ( + "context" + "errors" + "os" + "time" + + "github.com/go-kit/kit/log" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +const ( + name = "observability-addon" + namespace = "open-cluster-management-addon-observability" +) + +type StatusReport struct { + statusClient client.Client + logger log.Logger +} + +func New(logger log.Logger) (*StatusReport, error) { + testMode := os.Getenv("UNIT_TEST") != "" + standaloneMode := os.Getenv("STANDALONE") == "true" + var kubeClient client.Client + if testMode { + kubeClient = fake.NewFakeClient() + } else if standaloneMode { + kubeClient = nil + } else { + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return nil, errors.New("Failed to create the kube config") + } + s := scheme.Scheme + if err := oav1beta1.AddToScheme(s); err != nil { + return nil, errors.New("Failed to add observabilityaddon into scheme") + } + kubeClient, err = client.New(config, client.Options{Scheme: s}) + if err != nil { + return nil, errors.New("Failed to create the kube client") + } + } + + return &StatusReport{ + statusClient: kubeClient, + logger: log.With(logger, "component", "statusclient"), + }, nil +} + +func (s *StatusReport) UpdateStatus(t string, r string, m string) error { + if s.statusClient == nil { + return nil + } + addon := &oav1beta1.ObservabilityAddon{} + err := s.statusClient.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: namespace, + }, addon) + if err != nil { + logger.Log(s.logger, logger.Error, "err", err) + return err + } + update := false + found := false + conditions := []oav1beta1.StatusCondition{} + lastestC := oav1beta1.StatusCondition{} + for _, c := range addon.Status.Conditions { + if c.Status == metav1.ConditionTrue { + if c.Type != t { + c.Status = metav1.ConditionFalse + } else { + found = true + if c.Reason != r || c.Message != m { + c.Reason = r + c.Message = m + c.LastTransitionTime = metav1.NewTime(time.Now()) + update = true + lastestC = c + continue + } + } + } else { + if c.Type == t { + found = true + c.Status = metav1.ConditionTrue + c.Reason = r + c.Message = m + c.LastTransitionTime = metav1.NewTime(time.Now()) + update = true + lastestC = c + continue + } + } + conditions = append(conditions, c) + } + if update { + conditions = append(conditions, lastestC) + } + if !found { + conditions = append(conditions, oav1beta1.StatusCondition{ + Type: t, + Status: metav1.ConditionTrue, + Reason: r, + Message: m, + LastTransitionTime: metav1.NewTime(time.Now()), + }) + update = true + } + if update { + addon.Status.Conditions = conditions + err = s.statusClient.Status().Update(context.TODO(), addon) + if err != nil { + logger.Log(s.logger, logger.Error, "err", err) + } + return err + } + + return nil +} diff --git a/collectors/metrics/pkg/status/status_test.go b/collectors/metrics/pkg/status/status_test.go new file mode 100644 index 000000000..ec747efe5 --- /dev/null +++ b/collectors/metrics/pkg/status/status_test.go @@ -0,0 +1,65 @@ +// Copyright Contributors to the Open Cluster Management project +package status + +import ( + "context" + "os" + "testing" + "time" + + "github.com/go-kit/kit/log" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +func init() { + os.Setenv("UNIT_TEST", "true") + s := scheme.Scheme + _ = oav1beta1.AddToScheme(s) +} + +func TestUpdateStatus(t *testing.T) { + s, err := New(log.NewNopLogger()) + if err != nil { + t.Fatalf("Failed to create new Status struct: (%v)", err) + } + + addon := &oav1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: oav1beta1.ObservabilityAddonStatus{ + Conditions: []oav1beta1.StatusCondition{ + { + Type: "Ready", + Status: metav1.ConditionTrue, + Reason: "Deployed", + Message: "Metrics collector deployed and functional", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, + }, + } + err = s.statusClient.Create(context.TODO(), addon) + if err != nil { + t.Fatalf("Failed to create observabilityAddon: (%v)", err) + } + + err = s.UpdateStatus("Disabled", "Disabled", "enableMetrics is set to False") + if err != nil { + t.Fatalf("Failed to update status: (%v)", err) + } + + err = s.UpdateStatus("Ready", "Deployed", "Metrics collector deployed and functional") + if err != nil { + t.Fatalf("Failed to update status: (%v)", err) + } + + err = s.UpdateStatus("Ready", "Deployed", "Metrics collector deployed and updated") + if err != nil { + t.Fatalf("Failed to update status: (%v)", err) + } +} diff --git a/collectors/metrics/test/integration/clean.sh b/collectors/metrics/test/integration/clean.sh new file mode 100755 index 000000000..b01fe3a00 --- /dev/null +++ b/collectors/metrics/test/integration/clean.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +WORKDIR=`pwd` + +delete_kind_hub() { + echo "====Delete kind cluster=====" + kind delete cluster --name hub + rm $HOME/.kube/kind-config-hub > /dev/null 2>&1 +} + +delete_command_binaries(){ + cd ${WORKDIR} + echo "Current directory" + echo $(pwd) + rm ./kind > /dev/null 2>&1 + rm ./kubectl > /dev/null 2>&1 +} + +delete_kind_hub +delete_command_binaries \ No newline at end of file diff --git a/collectors/metrics/test/integration/kind/kind-hub.config.yaml b/collectors/metrics/test/integration/kind/kind-hub.config.yaml new file mode 100644 index 000000000..d0322b655 --- /dev/null +++ b/collectors/metrics/test/integration/kind/kind-hub.config.yaml @@ -0,0 +1,14 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 80 + hostPort: 80 + listenAddress: "0.0.0.0" + - containerPort: 443 + hostPort: 443 + listenAddress: "0.0.0.0" + - containerPort: 6443 + hostPort: 32806 + listenAddress: "0.0.0.0" diff --git a/collectors/metrics/test/integration/manifests/client-serving-certs-ca-bundle.yaml b/collectors/metrics/test/integration/manifests/client-serving-certs-ca-bundle.yaml new file mode 100644 index 000000000..e149d7a86 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/client-serving-certs-ca-bundle.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: client-serving-certs-ca-bundle + namespace: open-cluster-management-observability + annotations: + service.beta.openshift.io/inject-cabundle: 'true' +data: + service-ca.crt: "" diff --git a/collectors/metrics/test/integration/manifests/deployment.yaml b/collectors/metrics/test/integration/manifests/deployment.yaml new file mode 100644 index 000000000..8d4a245d6 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/deployment.yaml @@ -0,0 +1,136 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-collector + name: metrics-collector + namespace: open-cluster-management-observability +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: metrics-collector + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + labels: + k8s-app: metrics-collector + spec: + containers: + - command: + - /usr/bin/metrics-collector + - --from=$(FROM) + - --from-ca-file=/etc/serving-certs-ca-bundle/service-ca.crt + - --from-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token + - --to-upload=$(TO) + - --listen=localhost:8080 + - --match={__name__="up"} + - --match={__name__=":node_memory_MemAvailable_bytes:sum"} + - --match={__name__="cluster:capacity_cpu_cores:sum"} + - --match={__name__="cluster:capacity_memory_bytes:sum"} + - --match={__name__="cluster:container_cpu_usage:ratio"} + - --match={__name__="cluster:container_spec_cpu_shares:ratio"} + - --match={__name__="cluster:cpu_usage_cores:sum"} + - --match={__name__="cluster:memory_usage:ratio"} + - --match={__name__="cluster:memory_usage_bytes:sum"} + - --match={__name__="cluster:usage:resources:sum"} + - --match={__name__="cluster_infrastructure_provider"} + - --match={__name__="cluster_version"} + - --match={__name__="cluster_version_payload"} + - --match={__name__="container_cpu_cfs_throttled_periods_total"} + - --match={__name__="container_memory_cache"} + - --match={__name__="container_memory_rss"} + - --match={__name__="container_memory_swap"} + - --match={__name__="container_memory_working_set_bytes"} + - --match={__name__="container_network_receive_bytes_total"} + - --match={__name__="container_network_receive_packets_dropped_total"} + - --match={__name__="container_network_receive_packets_total"} + - --match={__name__="container_network_transmit_bytes_total"} + - --match={__name__="container_network_transmit_packets_dropped_total"} + - --match={__name__="container_network_transmit_packets_total"} + - --match={__name__="haproxy_backend_connections_total"} + - --match={__name__="instance:node_cpu_utilisation:rate1m"} + - --match={__name__="instance:node_load1_per_cpu:ratio"} + - --match={__name__="instance:node_memory_utilisation:ratio"} + - --match={__name__="instance:node_network_receive_bytes_excluding_lo:rate1m"} + - --match={__name__="instance:node_network_receive_drop_excluding_lo:rate1m",} + - --match={__name__="instance:node_network_transmit_bytes_excluding_lo:rate1m"} + - --match={__name__="instance:node_network_transmit_drop_excluding_lo:rate1m"} + - --match={__name__="instance:node_num_cpu:sum"} + - --match={__name__="instance:node_vmstat_pgmajfault:rate1m"} + - --match={__name__="instance_device:node_disk_io_time_seconds:rate1m"} + - --match={__name__="instance_device:node_disk_io_time_weighted_seconds:rate1m"} + - --match={__name__="kube_node_status_allocatable_cpu_cores"} + - --match={__name__="kube_node_status_allocatable_memory_bytes"} + - --match={__name__="kube_pod_container_resource_limits_cpu_cores"} + - --match={__name__="kube_pod_container_resource_limits_memory_bytes"} + - --match={__name__="kube_pod_container_resource_requests_cpu_cores"} + - --match={__name__="kube_pod_container_resource_requests_memory_bytes"} + - --match={__name__="kube_pod_info"} + - --match={__name__="kube_resourcequota"} + - --match={__name__="machine_cpu_cores"} + - --match={__name__="machine_memory_bytes"} + - --match={__name__="mixin_pod_workload"} + - --match={__name__="node_cpu_seconds_total"} + - --match={__name__="node_filesystem_avail_bytes"} + - --match={__name__="node_filesystem_size_bytes"} + - --match={__name__="node./oc _memory_MemAvailable_bytes"} + - --match={__name__="node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate"} + - --match={__name__="node_namespace_pod_container:container_memory_cache"} + - --match={__name__="node_namespace_pod_container:container_memory_rss"} + - --match={__name__="node_namespace_pod_container:container_memory_swap"} + - --match={__name__="node_namespace_pod_container:container_memory_working_set_bytes"} + - --match={__name__="node_netstat_Tcp_OutSegs"} + - --match={__name__="node_netstat_Tcp_RetransSegs"} + - --match={__name__="node_netstat_TcpExt_TCPSynRetrans"} + - --recordingrule={"name":"apiserver_request_duration_seconds:histogram_quantile_99","query":"histogram_quantile(0.99,sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (verb,le))"} + - --recordingrule={"name":"apiserver_request_duration_seconds:histogram_quantile_90","query":"histogram_quantile(0.90,sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (verb,le))"} + - --limit-bytes=52428800 + - --log-level=debug + - --label="cluster=func_e2e_test_travis-blue0/metrics-client:0.1.0-blue0/metrics-client:0.1.0" + env: + - name: ANONYMIZE_LABELS + - name: FROM + value: http://prometheus-k8s.openshift-monitoring.svc:9090 + - name: TO + value: https://observability-observatorium-observatorium-api.open-cluster-management-observability.svc.cluster.local:8443/api/metrics/v1/test-mtls/api/v1/receive + - name: HTTP_PROXY + - name: HTTPS_PROXY + - name: NO_PROXY + image: {{ METRICS_COLLECTOR_IMAGE }} + imagePullPolicy: Always + name: metrics-collector + ports: + - containerPort: 8080 + name: http + protocol: TCP + resources: + requests: + cpu: 1m + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - name: observability-managed-cluster-certs + readOnly: true + mountPath: /tlscerts/certs + - name: observability-managed-cluster-certs + readOnly: true + mountPath: /tlscerts/ca + - mountPath: /etc/serving-certs-ca-bundle + name: serving-certs-ca-bundle + readOnly: false + imagePullSecrets: + - name: multiclusterhub-operator-pull-secret + volumes: + - name: observability-managed-cluster-certs + secret: + secretName: observability-managed-cluster-certs + - configMap: + name: client-serving-certs-ca-bundle + name: serving-certs-ca-bundle diff --git a/collectors/metrics/test/integration/manifests/metrics-collector-cert.yaml b/collectors/metrics/test/integration/manifests/metrics-collector-cert.yaml new file mode 100644 index 000000000..2053a7fea --- /dev/null +++ b/collectors/metrics/test/integration/manifests/metrics-collector-cert.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +data: + ca.crt: >- + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhRENDQWxDZ0F3SUJBZ0lSQU9kMnNLMHIzUnlLQlJPM2NZcDhYSmt3RFFZSktvWklodmNOQVFFTEJRQXcKUlRFVk1CTUdBMVVFQ2hNTVkyVnlkQzF0WVc1aFoyVnlNU3d3S2dZRFZRUURFeU52WW5ObGNuWmhZbWxzYVhSNQpMWE5sY25abGNpMWpZUzFqWlhKMGFXWnBZMkYwWlRBZUZ3MHlNVEF5TWpVeU1EUTVNRFJhRncweU5qQXlNalF5Ck1EUTVNRFJhTUVVeEZUQVRCZ05WQkFvVERHTmxjblF0YldGdVlXZGxjakVzTUNvR0ExVUVBeE1qYjJKelpYSjIKWVdKcGJHbDBlUzF6WlhKMlpYSXRZMkV0WTJWeWRHbG1hV05oZEdVd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQQpBNElCRHdBd2dnRUtBb0lCQVFEek9NVTJXTjY5MDloa3JZT05kK0xPWE03UEx3anJNTzV5S2N1NEJNallEejloClhKM3d4cXJzRmRuRjZmcmZaVVlEMjZVaXBra1ZEaWQ0S2VQZ0prZHV2Uk0zZXJ2RHRtUjR5UkhBQXVMNnc3c3gKbFp0YmtmdlZUcTE4T1N5MEJuQzB3Y3dPR0pwdW9FSVNuWU42UGlEQjVycHpST0p3WDNBaXZxZVRJYjlzaGs0Uwo4NURxemRRZzdyZHpZeEdFbjZXUXFQSUVha3lIcWJETnpVNXJMZWIwcGE5TEFrN0NNS3c4Q0w1eXpyYzFDaHhyCkhidU9qQUtXckk4RnNqa2d6WWZ1ajhoQ2FyZ1I3MzYxR09XQzkxRGxramlrSTA4S05hUTFHR0U0Wk0xVWxiM2oKMDlSd0gxMVhlYXIwalBOL2dIYWdhdkVSTDVoTjIxRTR6NzJkUmsvMUFnTUJBQUdqVXpCUk1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01DNEdBMVVkRVFRbk1DV0NJMjlpYzJWeWRtRmlhV3hwCmRIa3RjMlZ5ZG1WeUxXTmhMV05sY25ScFptbGpZWFJsTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFEUjdQQm8KUWlxbW9HWTlqMG9UUmxJUCs1N0Y1UGd1OEF0bVlXUitnNE4vcW5vdVdVZ1ZaU085SG9abTBOWE43MjhPNk9wUwowbWxYcWcyRkptalFCQm96TnNvYUJpR0xVdmEvMFhDQTI2YWJtNU0vL3lHTTE0MXNnRFNvNmR6WjFNdS9wdUFiCmNxd09kaFpFVGhNMHZ0cTdwbGhTa1FwY1BBNjlBTGhTUUxRSW1MVFFaS1JyVkFSa3hvMExNOGpHSjVMSlZXd0oKUnBTS1pQdm9UYVNneUU2dkZEdmF4cGRuazZWMFVkRHNZSFVyK0Rsai9BcHgxOEQ3K0RMQnIwUXd4Y0NNWTVQWgpHWkNWU2hmVEpYMVcrMHlwL3VmWGlMNWYxeUpMaTZuMmxlZVNWdkEvWlRlMTg0TmYrZ2tpaHprVExObGhibWRKCjl3Rmo5TGFNRUdCVUhYRVQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: >- + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURUVENDQWpXZ0F3SUJBZ0lSQUwrekJrTVNKZGFic1VySzg0enNLNTh3RFFZSktvWklodmNOQVFFTEJRQXcKUlRFVk1CTUdBMVVFQ2hNTVkyVnlkQzF0WVc1aFoyVnlNU3d3S2dZRFZRUURFeU52WW5ObGNuWmhZbWxzYVhSNQpMV05zYVdWdWRDMWpZUzFqWlhKMGFXWnBZMkYwWlRBZUZ3MHlNVEF5TWpVeU1EUTVNVEZhRncweU1qQXlNalV5Ck1EUTVNVEZhTUVBeEZUQVRCZ05WQkFvVERHTmxjblF0YldGdVlXZGxjakVNTUFvR0ExVUVDeE1EWVdOdE1Sa3cKRndZRFZRUURFeEJ0WXkxc2IyTmhiQzFqYkhWemRHVnlNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QQpNSUlCQ2dLQ0FRRUExNEhmcXlNTDhZWjBsYlNzV1R0ZzdKK3cwQW9XSk84Ung1bkxpREdwWHVYdHhKcGFBeGVPCktzQ2NrYVFNaU1sZTJvTmZMT2pGNDhPTUZ6WHhmdm12TEpSNkJ2RmdsbTZkckxqbHpyRFdZU1ZhNmtrb3VYelQKajMrK0FLeHlNcTEvUVhWTGlRc1RSSWovaDhLbUV0QjNGakJpS2oybXc5MUtnM29iRWF2ZkFweHVXSUtwSkJDMwpNYXFaVHVTcldHeEJrMmljRDhFdnFUb2ZHR2VkR2Z0OEI4dkozN3YyeDY2V0FHOXhLbFp0N09LRVM2Ukc4aFF1CnRncVNGSnRtYi9PNkI0cVJyV3E5Vi9NaUthRXhzaDNhazJLbCttdmFFS0Z3dWtnbkk2aXVHRmxvTEhYYlVYdWQKU05INmUxNFhLM0xtUGZSb25KUlhjZHc3KzZCM0QvZ1krd0lEQVFBQm96MHdPekFPQmdOVkhROEJBZjhFQkFNQwpCYUF3REFZRFZSMFRBUUgvQkFJd0FEQWJCZ05WSFJFRUZEQVNnaEJ0WXkxc2IyTmhiQzFqYkhWemRHVnlNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFBNHhXNUNKa291L25YcFZ4b0dPdDBsV2VCWmZGU0VTQzlqTitnTXFXOHIKUWJTV3M2eDNHZnZpNFJaaHJNK1NZSFRMQmZFM1pKTUl0ZEtzTDBvOER3ZGhpQ05CeCttblV1aW9adU8xcHczWAp4RGlaR1UwY2J5RGZCclZFT2kvRWxjbHFoOWU2SHNNc0hSU2N2NTZQMGdkdVRvL3kxYWlnMkluOTFCcW51N1EzCkZKZWFNejBESHZ1SDZydEY2UnNmKzFmdDVYVFAzUGJySy9Qa2RSRkp5Q3Z4NW1lb0YyUSs4ZkVTWkVDR3BKeHAKYVEzSU9vYVVzNFpNc2NWMG9yakRNRjJNT29lbnVuQjdXZjBSWXQvMXQyOGtzUUlqVzF0SUZwZG9JZStpb3ZmdAp6QzM4Mk1mUEZwaTQ1Vk9RNWMzZ2lWTkg1RDg3ektURy9xNXBodCtWZWVJSQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: >- + LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBMTRIZnF5TUw4WVowbGJTc1dUdGc3Sit3MEFvV0pPOFJ4NW5MaURHcFh1WHR4SnBhCkF4ZU9Lc0Nja2FRTWlNbGUyb05mTE9qRjQ4T01Gelh4ZnZtdkxKUjZCdkZnbG02ZHJMamx6ckRXWVNWYTZra28KdVh6VGozKytBS3h5TXExL1FYVkxpUXNUUklqL2g4S21FdEIzRmpCaUtqMm13OTFLZzNvYkVhdmZBcHh1V0lLcApKQkMzTWFxWlR1U3JXR3hCazJpY0Q4RXZxVG9mR0dlZEdmdDhCOHZKMzd2Mng2NldBRzl4S2xadDdPS0VTNlJHCjhoUXV0Z3FTRkp0bWIvTzZCNHFScldxOVYvTWlLYUV4c2gzYWsyS2wrbXZhRUtGd3VrZ25JNml1R0Zsb0xIWGIKVVh1ZFNOSDZlMTRYSzNMbVBmUm9uSlJYY2R3Nys2QjNEL2dZK3dJREFRQUJBb0lCQUcxdXJxUlpmSlFkd1kvUAptZ0VKelBMN09DNjhJSGlLeVkyQXhFQ1piemNZTEwrNzNWOWFrNytUNVMxemowWGFjNGNrVkswYVducUk3dGRPCmRoNFRMZWZHVVp5NjcyQVVOWTdpcVJkbTVzV1gwVzNNenBnNkViZjVsd01Dams2N21UWWxHTUV6Vllwa3d0VXUKZTJSc1pUeG9QMERSNDUvci9BMTljTWR0YmlZRldBNTdTdkJXRUxpdHRMOHR4T0xwWjhRbnhjejlOd1c4TWVWSAozSy94ZGYxejBsRG10RTZXeUVtL2RFWk55ekxJOUlyVGVFbFBIWVJpUHQ0cGNCT25zMVVmSDc0RzN0dGxyWjFBCmk1anVZMURUM21ublNZKy9PYnZ3K29LTEY3NXA0ejBOcW53UzhIUk43bEgyT1lqRm5CdUUxa3RQVkVVYitkTlcKdnpYaDBtRUNnWUVBMkdodlY5alI0YWpnbDVIbDhVMjVQdmRVVXQ2d29hWW9nSE1BZU9PMUhLbFdzWk9qVTdJegpRMFRmVDUwNjBjWE5zNkFEdHc5M0F2Q1hndlFrVWFmeU0xVlBMREVWbGlsYmNHQVc3L2FXQ3RHcEkvcGpURkZoCmgvU3dvNkcybFJGSGc2dUV0V3ZVTS9IZkNzY3ZsT1dNL1ViWTNjOFFMVld1Y3Y1dE1TcEtKTXNDZ1lFQS91OUIKNG93YjRQZW9pbjFLQlZMTWx5SUdlRzd4ZmY4YkFlZ3ZCazRQSUE5Zk1pOU5pM00rMG9KazREQ0hFaVRoUzdEeApVVVVIZWJzNFh1c2JlL25ZL29ndVc4NnJPY3UvTytvL1hRV2lBcXdIdnl4R2gyYUVrYlRXOTBxcmtBcnRPNDJMCkNIdVo4Z3E0ZTg4U0VzenNrcHVhOG8wS1VXS0NoTmljTStaRmhwRUNnWUJnb2NHV0tHc1BxS2toQlRUNEd3eXMKNFNQMkhSb2lGRi9oZTNpcWdvTk0yUkN0R28vRHY3N0VPdXk4VUNrRG9wL0hwWndERWhUQ2EwdzhYV2dhVGpRSQpWLytTYkJpSGhOQWZ6Y2h3UGx1aFdSMTl3MEN6dzJNWm9sbDl0NmdmeUNkMjFBL2VNdGYrbWZKT2pjTGhhZDhhCitaZTg4OGoxZFYwQVhEdkZ5R3h6SVFLQmdRRFNtTjJwZzZySUptN0JzUXlENTBtQmxDY2FPRFFnckt3bjRDRncKcU8xUHc4TVZWd1JOTGEvNUFLVEttdXdKN2hON2pybGJOSllmVnEwVHBFNzVGSVhRaHJrZE9GWTBrWDd4eE9vQgpNMVVKdGdsVVFRUW1lR2NOWXdHdUdhNHVlUWhQQ1BlUTAySnlFNjhoQUFkakpveEw5R1dFOTBSTnhQNGpOam1iCm1oOVhNUUtCZ0FHZnNPRWxxdUFqbFQ2WFcvMWhJVGUydXpwbHJtdGliQmw0Wlo3bkMyYXBXa3VHSERWejBmbHIKR1YzaUZhcmlrUkFpR2N4MklMdjJNQkMyWE9XQVdZbyt6VVhtd25hWWtLRFlibkNMU0tFalI1RFVBSVRHQThwWgpoRTIyRTVIb2M3VDhseFpyaXZvTGNmcGxJS3V4eGR4QUhFWmxDcVhtMUU4UkNsMmhRS1k0Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +type: Opaque +kind: Secret +metadata: + name: observability-managed-cluster-certs + namespace: open-cluster-management-observability diff --git a/collectors/metrics/test/integration/manifests/observatorium-api-configmap.yaml b/collectors/metrics/test/integration/manifests/observatorium-api-configmap.yaml new file mode 100644 index 000000000..0d79ee9b6 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/observatorium-api-configmap.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +data: + rbac.yaml: |- + "roleBindings": + - "name": "test-mtls" + "roles": + - "write" + "subjects": + - "kind": "user" + "name": "up" + - "kind": "group" + "name": "acm" + "roles": + - "name": "write" + "permissions": + - "write" + "resources": + - "metrics" + "tenants": + - "test-mtls" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: latest + name: observability-observatorium-observatorium-api + namespace: open-cluster-management-observability diff --git a/collectors/metrics/test/integration/manifests/observatorium-api-secret.yaml b/collectors/metrics/test/integration/manifests/observatorium-api-secret.yaml new file mode 100644 index 000000000..2976ec8ea --- /dev/null +++ b/collectors/metrics/test/integration/manifests/observatorium-api-secret.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +data: + tenants.yaml: dGVuYW50czoKLSBuYW1lOiB0ZXN0LW10bHMKICBpZDogODQ1Y2RmZDktZjkzNi00NDNjLTk3OWMtMmVlN2RjOTFmNjQ2CiAgbVRMUzoKICAgIGNhUGF0aDogL2V0Yy9vYnNlcnZhdG9yaXVtMi9jYS5wZW0K +kind: Secret +metadata: + creationTimestamp: "2020-09-06T04:11:53Z" + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: latest + name: observability-observatorium-observatorium-api + namespace: open-cluster-management-observability +type: Opaque diff --git a/collectors/metrics/test/integration/manifests/observatorium-api-service.yaml b/collectors/metrics/test/integration/manifests/observatorium-api-service.yaml new file mode 100644 index 000000000..853e761e5 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/observatorium-api-service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: latest + name: observability-observatorium-observatorium-api + namespace: open-cluster-management-observability +spec: + ports: + - name: internal + port: 8081 + protocol: TCP + targetPort: 8081 + - name: public + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + sessionAffinity: None + type: ClusterIP diff --git a/collectors/metrics/test/integration/manifests/observatorium-api.yaml b/collectors/metrics/test/integration/manifests/observatorium-api.yaml new file mode 100644 index 000000000..06186d02e --- /dev/null +++ b/collectors/metrics/test/integration/manifests/observatorium-api.yaml @@ -0,0 +1,92 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + generation: 1 + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: latest + name: observability-observatorium-observatorium-api + namespace: open-cluster-management-observability +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: api + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: observatorium-api + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: latest + spec: + containers: + - args: + - --web.listen=0.0.0.0:8443 + - --web.internal.listen=0.0.0.0:8448 + - --metrics.read.endpoint=http://observability-observatorium-thanos-query-frontend.open-cluster-management-observability.svc.cluster.local:9090 + - --metrics.write.endpoint=http://observability-observatorium-thanos-receive-default:19291 + - --log.level=warn + - --rbac.config=/etc/observatorium/rbac.yaml + - --tenants.config=/etc/observatorium/tenants.yaml + - --tls.server.cert-file=/etc/observatorium2/server.pem + - --tls.server.key-file=/etc/observatorium2/server.key + - --tls.healthchecks.server-ca-file=/etc/observatorium2/server-ca.pem + image: quay.io/observatorium/observatorium:latest + imagePullPolicy: Always + name: observatorium-api + ports: + - containerPort: 8081 + name: internal + protocol: TCP + - containerPort: 8443 + name: public + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/observatorium/rbac.yaml + name: rbac + readOnly: true + subPath: rbac.yaml + - mountPath: /etc/observatorium/tenants.yaml + name: tenants + readOnly: true + subPath: tenants.yaml + - mountPath: /etc/observatorium2 + name: observatorium-certs + readOnly: true + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + volumes: + - configMap: + defaultMode: 420 + name: observability-observatorium-observatorium-api + name: rbac + - name: tenants + secret: + defaultMode: 420 + secretName: observability-observatorium-observatorium-api + - name: observatorium-certs + secret: + defaultMode: 420 + secretName: observatorium-certs \ No newline at end of file diff --git a/collectors/metrics/test/integration/manifests/observatorium-ca-cert.yaml b/collectors/metrics/test/integration/manifests/observatorium-ca-cert.yaml new file mode 100644 index 000000000..2ea9f2e4d --- /dev/null +++ b/collectors/metrics/test/integration/manifests/observatorium-ca-cert.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +data: + server-ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhRENDQWxDZ0F3SUJBZ0lSQU9kMnNLMHIzUnlLQlJPM2NZcDhYSmt3RFFZSktvWklodmNOQVFFTEJRQXcKUlRFVk1CTUdBMVVFQ2hNTVkyVnlkQzF0WVc1aFoyVnlNU3d3S2dZRFZRUURFeU52WW5ObGNuWmhZbWxzYVhSNQpMWE5sY25abGNpMWpZUzFqWlhKMGFXWnBZMkYwWlRBZUZ3MHlNVEF5TWpVeU1EUTVNRFJhRncweU5qQXlNalF5Ck1EUTVNRFJhTUVVeEZUQVRCZ05WQkFvVERHTmxjblF0YldGdVlXZGxjakVzTUNvR0ExVUVBeE1qYjJKelpYSjIKWVdKcGJHbDBlUzF6WlhKMlpYSXRZMkV0WTJWeWRHbG1hV05oZEdVd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQQpBNElCRHdBd2dnRUtBb0lCQVFEek9NVTJXTjY5MDloa3JZT05kK0xPWE03UEx3anJNTzV5S2N1NEJNallEejloClhKM3d4cXJzRmRuRjZmcmZaVVlEMjZVaXBra1ZEaWQ0S2VQZ0prZHV2Uk0zZXJ2RHRtUjR5UkhBQXVMNnc3c3gKbFp0YmtmdlZUcTE4T1N5MEJuQzB3Y3dPR0pwdW9FSVNuWU42UGlEQjVycHpST0p3WDNBaXZxZVRJYjlzaGs0Uwo4NURxemRRZzdyZHpZeEdFbjZXUXFQSUVha3lIcWJETnpVNXJMZWIwcGE5TEFrN0NNS3c4Q0w1eXpyYzFDaHhyCkhidU9qQUtXckk4RnNqa2d6WWZ1ajhoQ2FyZ1I3MzYxR09XQzkxRGxramlrSTA4S05hUTFHR0U0Wk0xVWxiM2oKMDlSd0gxMVhlYXIwalBOL2dIYWdhdkVSTDVoTjIxRTR6NzJkUmsvMUFnTUJBQUdqVXpCUk1BNEdBMVVkRHdFQgovd1FFQXdJQ3BEQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01DNEdBMVVkRVFRbk1DV0NJMjlpYzJWeWRtRmlhV3hwCmRIa3RjMlZ5ZG1WeUxXTmhMV05sY25ScFptbGpZWFJsTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFEUjdQQm8KUWlxbW9HWTlqMG9UUmxJUCs1N0Y1UGd1OEF0bVlXUitnNE4vcW5vdVdVZ1ZaU085SG9abTBOWE43MjhPNk9wUwowbWxYcWcyRkptalFCQm96TnNvYUJpR0xVdmEvMFhDQTI2YWJtNU0vL3lHTTE0MXNnRFNvNmR6WjFNdS9wdUFiCmNxd09kaFpFVGhNMHZ0cTdwbGhTa1FwY1BBNjlBTGhTUUxRSW1MVFFaS1JyVkFSa3hvMExNOGpHSjVMSlZXd0oKUnBTS1pQdm9UYVNneUU2dkZEdmF4cGRuazZWMFVkRHNZSFVyK0Rsai9BcHgxOEQ3K0RMQnIwUXd4Y0NNWTVQWgpHWkNWU2hmVEpYMVcrMHlwL3VmWGlMNWYxeUpMaTZuMmxlZVNWdkEvWlRlMTg0TmYrZ2tpaHprVExObGhibWRKCjl3Rmo5TGFNRUdCVUhYRVQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBbzIyaVRPa0JibzlTNEJjWndLamJUMTg4R1BoL3JyenBRUy8vWjhpa0k1ZkY5MDcrCnNBSHlpRDFNT0JSTWtWVzRGbEtMQ0FveTFQUWwzVDRObHBYbDZackpFeFRCQ2FQNlhPcURqbHArOTVIcFhpdGIKdDFpQzF6ZXRLbnNMNkdoa0U3L1Azekt6SXkzV1drTXNNQWxLMTMrUmNneno3clBRRW9kTlNDdWJ1SXRwaGlZRgo1NGJHNVJTVm9GNXhOZGQzcnBlR3RWM25BY0hBRGpISGYrT2hZM2NQN1BYeUlwV3lXem82d0Z6WHZVQ0ZGaldUCnVNb2dtQy9lNDFsNE5BQ3Yxbk82dU9hRFhsZnFKYWRlVkFEclpURTdFZlF4eGtzY2UvQVZIVEdKUkZpV3o2U0cKaHNuUm5mUWxsNi9KTGlqSTNqZ1hmaDZTTmxQaFpNOXdoOU5yeFFJREFRQUJBb0lCQUZBRkNWZnhLc05RcC9xVApOUWZSWmZpYUg5bjJLc2FMTndhMUVyQmxYSUVVK2xpYU5EaEg2OUhyZVp0bnpRbklWK2VPS1FjWnYwUG4rM1kxCndQOS9pK0J5Nm90T09TcVlRS2tvSnB3WUVWejA4N2dwTUxJNFEzQlBrWmxnUURkZWpjbmh2RTJFREpSb0NER1AKL0YzVTN2RmZRLzc4K3A5aEticzR4bzFVMU4xcGxSM0pXa1Nab1RCc3lJZEkvY1JqOHc4YjlZS2JvQXJEald0VApPV3VTT2dqbGc5L0hNaWVBdFltc0orQlZ4eGxoYkdwMmRJc2RPR1oySGJiMUhxL2tWUHlIYnY5aGNIM0dzQXQzClhzLytMNHU0Z3NaZFBrZ282Y29uZWdQZkVnQ2lhTHl6NjFoRldHSU4rWXdYeEdMb3dVQUppdWxldEpPNk9nYWkKa1E0cE5BRUNnWUVBeEljSVB4NWJGV0t5UmRJUHF3Q25uRDk2SERtR2lBSUczaldmQWJCeVZkbXZMa2NGK0wrNwpHOXJ6VWRPMmVTSkJyZTFKSkU3RzBoMGtrT0twQ09SRkFSeExONUxlc0owaTAzV3YySVpuMFU0enl2d2tMR3dsCk5hY3cySUlVZytTcUZNbUkvYzY4WExnOXVmLzExVzBnckVhOWVxMWNOdzBTOGZXQU1yNVM1UUVDZ1lFQTFPSm8KalZhRnVZVEMrckNvYzlGZTdqN0pBWG1ybFd1a0lNYWtYYVZLWmYzY2pjUmpYYU5DZzdmZ2RtQUZFQTM5MEIzSgpucmZPY3crN0FiSEFCTEFhM2VTakFlUytvTHRzQ28xSXRld2hFRDcxV0NrbWtTajZyN0FZUnBkakZVaXJuZkgvCmpDbnlQMEJXY1VHaW94M3NtRTRvRXhSSlZVY3VFK1QrVlRaUE1zVUNnWUJuWjdGVUhMSEtMOU1qWDZLMDFEUmMKeHgyR0NsS1o3NDhUamwwcXMxK0Znbk5sUXlPR1BjUENwVkxQcERqbnhibVBQVUgxNDZsRFZ1Z3RmOFYzRXp5bgpsKzZQd0N5Qit6d2V6VmFIOURoZDdlcmNqQXl3ZnZxWkgyNVpEU0NrUVVXb2lGNFhSWmJncG1SOElJeUdEdFJaCk1TTnprQkhzT1duVmU5cEhXTk1mQVFLQmdCbTBNSjJJS2VOSmhXVlJlVFZxdlVTVTdoNHdlNmd4OUFKRkdjbDQKMEsvWjVBeDEvdTFxYmZpTGRoMjA0RWVjK04xdEMxQVByMnl6SkloQzF3VjU0ZlNTeTUvSjU3NXVndEFQV1EwbQpNcVBNaGtFYnp0MU9EbDZ5SzJ3eWtUTWJzQ1VFK0M4TFRZV1hsaGhjclc5MmlIK25TQzF0a0tFc1ZBTHc3a2lKCm95eGxBb0dBU0hNQW9uWHl3ZGdFUFJUNTdnL2hCeDczYjRnRWFMLytKaHkraTJjVTkveld2VmpUdVZtZGIyL00Kc0NDWG9kWEZoRTZ3aTZwSkJweGVZTUQwWkJMdFlLNDM2dDkweFcyTU91cFlvZ0doUGRDdnpEWnZoU0lEdnpycAp2SXRsVm9zTm92Ri8zanljRUZWemdPdVFtN0N3KzZOQU5VZXFpaFlQN1I1a1ZUczN1Q0U9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== + server.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMRENDQXhTZ0F3SUJBZ0lSQUliOGFrcURUc21UOWxmbnZ4S1A5OTB3RFFZSktvWklodmNOQVFFTEJRQXcKUlRFVk1CTUdBMVVFQ2hNTVkyVnlkQzF0WVc1aFoyVnlNU3d3S2dZRFZRUURFeU52WW5ObGNuWmhZbWxzYVhSNQpMWE5sY25abGNpMWpZUzFqWlhKMGFXWnBZMkYwWlRBZUZ3MHlNVEF5TWpVeU1EUTVNRGxhRncweU1qQXlNalV5Ck1EUTVNRGxhTUVJeEZUQVRCZ05WQkFvVERHTmxjblF0YldGdVlXZGxjakVwTUNjR0ExVUVBeE1nYjJKelpYSjIKWVdKcGJHbDBlUzF6WlhKMlpYSXRZMlZ5ZEdsbWFXTmhkR1V3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQgpEd0F3Z2dFS0FvSUJBUUNqYmFKTTZRRnVqMUxnRnhuQXFOdFBYendZK0grdXZPbEJMLzlueUtRamw4WDNUdjZ3CkFmS0lQVXc0RkV5UlZiZ1dVb3NJQ2pMVTlDWGRQZzJXbGVYcG1za1RGTUVKby9wYzZvT09Xbjcza2VsZUsxdTMKV0lMWE42MHFld3ZvYUdRVHY4L2ZNck1qTGRaYVF5d3dDVXJYZjVGeURQUHVzOUFTaDAxSUs1dTRpMm1HSmdYbgpoc2JsRkpXZ1huRTExM2V1bDRhMVhlY0J3Y0FPTWNkLzQ2Rmpkdy9zOWZJaWxiSmJPanJBWE5lOVFJVVdOWk80CnlpQ1lMOTdqV1hnMEFLL1djN3E0NW9OZVYrb2xwMTVVQU90bE1Uc1I5REhHU3h4NzhCVWRNWWxFV0piUHBJYUcKeWRHZDlDV1hyOGt1S01qZU9CZCtIcEkyVStGa3ozQ0gwMnZGQWdNQkFBR2pnZ0VZTUlJQkZEQU9CZ05WSFE4QgpBZjhFQkFNQ0JhQXdEQVlEVlIwVEFRSC9CQUl3QURDQjh3WURWUjBSQklIck1JSG9naUJ2WW5ObGNuWmhZbWxzCmFYUjVMWE5sY25abGNpMWpaWEowYVdacFkyRjBaWUpsYjJKelpYSjJZV0pwYkdsMGVTMXZZbk5sY25aaGRHOXkKYVhWdExXOWljMlZ5ZG1GMGIzSnBkVzB0WVhCcExtOXdaVzR0WTJ4MWMzUmxjaTF0WVc1aFoyVnRaVzUwTFc5aQpjMlZ5ZG1GaWFXeHBkSGt1YzNaakxtTnNkWE4wWlhJdWJHOWpZV3lDWFc5aWMyVnlkbUYwYjNKcGRXMHRZWEJwCkxXOXdaVzR0WTJ4MWMzUmxjaTF0WVc1aFoyVnRaVzUwTFc5aWMyVnlkbUZpYVd4cGRIa3VZWEJ3Y3k1dFlYSmoKYnk1a1pYWXdOUzV5WldRdFkyaGxjM1JsY21acFpXeGtMbU52YlRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQQpNd0JlandBTU4yUzZ3QlpRMGxrVWs4L2NGampjQUdwNFE0Nm45Sjh1WmFZclpuU3VwK2l4N2JHOWRIMUpjQmZHCnJhRW5Ua08rekQwdU0wOXNLeVhNK0pSckl1Y3RjaXBaYU11QlE4SUZScGlzWW9JUXZkMmZnOFo0bnY5aG1sK1YKMm0yNXBPemYwT0MrNXR3WllwYU5rVWhRU25lTEdqb2diZENPVzNJSHduU0V3TmJza0Z4WlVTcERFWnlQODB5NQpNNFhKeWMyc1k5b2lLVEQrL2NXeGZCNlpYYStEQnU3Z1huQWpXbWpHNndBNVZveUV4OHNEOXBDWCtHNzBKV2NxCk9WelRXQXBtVURHdzhvQTFrb3hrdktidjZDWTRUNnpYdC9VMzhxZDdaNkhibExxM2dYak1Rd2JLS0FiZUFFSVQKWEhoajdkQlBIUVV2aU5LUFp0UXEzdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWsrZ0F3SUJBZ0lRQitHV3ArMUdaeSsxeGZEL3o2NlRBREFOQmdrcWhraUc5dzBCQVFzRkFEQkYKTVJVd0V3WURWUVFLRXd4alpYSjBMVzFoYm1GblpYSXhMREFxQmdOVkJBTVRJMjlpYzJWeWRtRmlhV3hwZEhrdApZMnhwWlc1MExXTmhMV05sY25ScFptbGpZWFJsTUI0WERUSXhNREl5TlRJd05Ea3dNMW9YRFRJMk1ESXlOREl3Ck5Ea3dNMW93UlRFVk1CTUdBMVVFQ2hNTVkyVnlkQzF0WVc1aFoyVnlNU3d3S2dZRFZRUURFeU52WW5ObGNuWmgKWW1sc2FYUjVMV05zYVdWdWRDMWpZUzFqWlhKMGFXWnBZMkYwWlRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUxWTG0yc3M3OXZpZlA2MXJ3bXFMSDh0WWViTlhCckFiQjJPRGs3dWJZdUlua3BSCmI0azFHckFWVVN0cGJ0djF0NTNZVVJ0SElZU2dva1RMdjRRRXpBSDd5U3pWamM3VXg4N2JjdjExV2NWQllRSDEKV0VRL0s3emVUa1huQ096SDRwN3c1cDBZbU1hQVJkOGZlOElMQ3dIbUZGNHR1VHdmL3hKL1kzTEFZNWplWUNNeApHUXRENjRPQzY0UGNOR3dCNDJTRGU3RXQ1R0dFQmFwaXF5YTFzQ1RhQk14Zmp3VzUrU0pScXVKQjhnNlFPNGlFCmtQYlAraTNEN3pETWFCQmNkbGtzQkc1dWxaZlB3NGlGUU5rdncvelhpVFkrUjN2dVAzY0ZMMENDSlo3ei9GRnUKYjRmaGM0UVdUQ2dHbE9XSjNSOEpPUHBKWGdJWXpQSUU4K1VBSURzQ0F3RUFBYU5UTUZFd0RnWURWUjBQQVFILwpCQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0xnWURWUjBSQkNjd0pZSWpiMkp6WlhKMllXSnBiR2wwCmVTMWpiR2xsYm5RdFkyRXRZMlZ5ZEdsbWFXTmhkR1V3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQURrYmRrVTQKWkI5bkx5ZzJtTW5PaU5tWkEzT0drUzBySXYyYzVOU2ZEQ0M0NzNGK3F6NDhSc2psODZuWDl5a05PVTVsR2J2cwpCZWFJV3hOWDhCbGNpTy81ajNCZlUvQW9ha05DZGZDVk1CR3c3QkZmQUZuZHBNVnVYN1daSCtmaWt4YWlVNVNrCmdJdkN5T0ZJTUV5Y1lTeGtPQTVoci9ka3dNQktDSkEzdXlrbUt1aFdua2pXZDhXVjB3QVpMeHpMQk82R3NlTzQKeFBDbkE3c0JsaFpXOStQVjRpY0k3b0tCQTlQZzhpWnJtU2N3Y0RmSTNKYzV3bmNBRnRwR3JGSnJQSFQ5NWFFVQpaQitPREdvOXExZUhlS2w5QlNqallLeDJwTURuYUF5THdqZFhPU0R0ZTVpL1RwekhtVGEyL0l0RzF5WHdnMHl6CkNyWWhJcmtmY21RWWtzWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +kind: Secret +metadata: + name: observatorium-certs + namespace: open-cluster-management-observability +type: Opaque diff --git a/collectors/metrics/test/integration/manifests/rolebinding.yaml b/collectors/metrics/test/integration/manifests/rolebinding.yaml new file mode 100644 index 000000000..235848a92 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: test-client-view +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view +subjects: +- kind: ServiceAccount + name: default + namespace: open-cluster-management-observability \ No newline at end of file diff --git a/collectors/metrics/test/integration/manifests/thanos-api.yaml b/collectors/metrics/test/integration/manifests/thanos-api.yaml new file mode 100644 index 000000000..398058cfe --- /dev/null +++ b/collectors/metrics/test/integration/manifests/thanos-api.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + generation: 1 + labels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: 2.3.0-SNAPSHOT-2021-07-26-18-43-26 + controller.receive.thanos.io: thanos-receive-controller + controller.receive.thanos.io/hashring: default + name: observability-observatorium-thanos-receive-default + namespace: open-cluster-management-observability +spec: + podManagementPolicy: OrderedReady + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + controller.receive.thanos.io/hashring: default + serviceName: observability-observatorium-thanos-receive-default + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: 2.3.0-SNAPSHOT-2021-07-26-18-43-26 + controller.receive.thanos.io/hashring: default + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - thanos-receive + - key: app.kubernetes.io/instance + operator: In + values: + - observability-observatorium + namespaces: + - open-cluster-management-observability + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - thanos-receive + - key: app.kubernetes.io/instance + operator: In + values: + - observability-observatorium + namespaces: + - open-cluster-management-observability + topologyKey: topology.kubernetes.io/zone + weight: 100 + containers: + - args: + - receive + - --log.level=info + - --grpc-address=0.0.0.0:10901 + - --http-address=0.0.0.0:10902 + - --remote-write.address=0.0.0.0:19291 + - --receive.replication-factor=1 + - --tsdb.path=/var/thanos/receive + - --label=replica="$(NAME)" + - --label=receive="true" + - --receive.local-endpoint=0.0.0.0:10901 + - --tsdb.retention=4d + - --receive.hashrings-file=/var/lib/thanos-receive/hashrings.json + env: + - name: NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: DEBUG + image: quay.io/stolostron/thanos:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 8 + httpGet: + path: /-/healthy + port: 10902 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 1 + name: thanos-receive + ports: + - containerPort: 10901 + name: grpc + protocol: TCP + - containerPort: 10902 + name: http + protocol: TCP + - containerPort: 19291 + name: remote-write + protocol: TCP + readinessProbe: + failureThreshold: 20 + httpGet: + path: /-/ready + port: 10902 + scheme: HTTP + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /var/thanos/receive + name: data + - mountPath: /var/lib/thanos-receive + name: hashring-config + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + defaultMode: 420 + name: observability-observatorium-thanos-receive-controller-tenants-generated + name: hashring-config + updateStrategy: + rollingUpdate: + partition: 0 + type: RollingUpdate + volumeClaimTemplates: + - metadata: + creationTimestamp: null + labels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + controller.receive.thanos.io/hashring: default + name: data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: gp2 + volumeMode: Filesystem \ No newline at end of file diff --git a/collectors/metrics/test/integration/manifests/thanos-configmap.yaml b/collectors/metrics/test/integration/manifests/thanos-configmap.yaml new file mode 100644 index 000000000..c2c657375 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/thanos-configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + hashrings.json: '[{"hashring":"default","endpoints":["0.0.0.0:10901"]}]' +kind: ConfigMap +metadata: + name: observability-observatorium-thanos-receive-controller-tenants-generated + namespace: open-cluster-management-observability \ No newline at end of file diff --git a/collectors/metrics/test/integration/manifests/thanos-pvc.yaml b/collectors/metrics/test/integration/manifests/thanos-pvc.yaml new file mode 100644 index 000000000..ff8118b83 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/thanos-pvc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + controller.receive.thanos.io/hashring: default + name: data-observability-observatorium-thanos-receive-default-0 + namespace: open-cluster-management-observability +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + volumeMode: Filesystem diff --git a/collectors/metrics/test/integration/manifests/thanos-service.yaml b/collectors/metrics/test/integration/manifests/thanos-service.yaml new file mode 100644 index 000000000..19b8747b5 --- /dev/null +++ b/collectors/metrics/test/integration/manifests/thanos-service.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + app.kubernetes.io/version: 2.3.0-SNAPSHOT-2021-07-26-18-43-26 + controller.receive.thanos.io/hashring: default + name: observability-observatorium-thanos-receive-default + namespace: open-cluster-management-observability +spec: + clusterIP: None + ports: + - name: grpc + port: 10901 + protocol: TCP + targetPort: 10901 + - name: http + port: 10902 + protocol: TCP + targetPort: 10902 + - name: remote-write + port: 19291 + protocol: TCP + targetPort: 19291 + selector: + app.kubernetes.io/component: database-write-hashring + app.kubernetes.io/instance: observability-observatorium + app.kubernetes.io/name: thanos-receive + app.kubernetes.io/part-of: observatorium + controller.receive.thanos.io/hashring: default + sessionAffinity: None + type: ClusterIP + diff --git a/collectors/metrics/test/integration/prereq.sh b/collectors/metrics/test/integration/prereq.sh new file mode 100755 index 000000000..b1202ab86 --- /dev/null +++ b/collectors/metrics/test/integration/prereq.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +WORKDIR=`pwd` + +setup_kubectl_command() { + command -v kubectl + if [ $? -ne 0 ]; then + echo "=====Setup kubectl=====" + # kubectl required for kind + echo "Install kubectl from openshift mirror (https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-mac-4.4.14.tar.gz)" + mv README.md README.md.tmp + if [[ "$(uname)" == "Darwin" ]]; then # then we are on a Mac + curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-mac-4.4.14.tar.gz + tar xzvf openshift-client-mac-4.4.14.tar.gz # xzf to quiet logs + rm openshift-client-mac-4.4.14.tar.gz + elif [[ "$(uname)" == "Linux" ]]; then # we are in travis, building in rhel + curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-linux-4.4.14.tar.gz + tar xzvf openshift-client-linux-4.4.14.tar.gz # xzf to quiet logs + rm openshift-client-linux-4.4.14.tar.gz + fi + # this package has a binary, so: + + echo "Current directory" + echo $(pwd) + mv README.md.tmp README.md + chmod +x ./kubectl + sudo cp ./kubectl /usr/local/bin/kubectl + fi + # kubectl are now installed in current dir + echo -n "kubectl version" && kubectl version +} + +install_kind() { + command -v kind + if [ $? -ne 0 ]; then + echo "Install kind from (https://kind.sigs.k8s.io/)." + + # uname returns your operating system name + # uname -- Print operating system name + # -L location, lowercase -o specify output name, uppercase -O Write output to a local file named like the remote file we get + curl -Lo ./kind "https://kind.sigs.k8s.io/dl/v0.7.0/kind-$(uname)-amd64" + chmod +x ./kind + sudo cp ./kind /usr/local/bin/kind + fi +} + + +install_kind +setup_kubectl_command \ No newline at end of file diff --git a/collectors/metrics/test/integration/setup.sh b/collectors/metrics/test/integration/setup.sh new file mode 100755 index 000000000..1834dd771 --- /dev/null +++ b/collectors/metrics/test/integration/setup.sh @@ -0,0 +1,182 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +if [ "$#" -ne 1 ] ; then + echo "Usage: $0 IMAGE" >&2 + exit 1 +fi + +echo "=====running kind exploration=====" + +IMAGE_NAME=$1 +echo "IMAGE: " $IMAGE_NAME + +DEFAULT_NS="open-cluster-management" +HUB_KUBECONFIG=$HOME/.kube/kind-config-hub +WORKDIR=`pwd` + +sed_command='sed -i-e -e' +if [[ "$(uname)" == "Darwin" ]]; then + sed_command='sed -i '-e' -e' +fi + +deploy() { + #setup_kubectl_command + create_kind_hub + deploy_prometheus_operator + deploy_observatorium + deploy_thanos + deploy_metrics_collector $IMAGE_NAME +} + +setup_kubectl_command() { + echo "=====Setup kubectl=====" + # kubectl required for kind + echo "Install kubectl from openshift mirror (https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-mac-4.4.14.tar.gz)" + mv README.md README.md.tmp + if [[ "$(uname)" == "Darwin" ]]; then # then we are on a Mac + curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-mac-4.4.14.tar.gz + tar xzvf openshift-client-mac-4.4.14.tar.gz # xzf to quiet logs + rm openshift-client-mac-4.4.14.tar.gz + elif [[ "$(uname)" == "Linux" ]]; then # we are in travis, building in rhel + curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.4.14/openshift-client-linux-4.4.14.tar.gz + tar xzvf openshift-client-linux-4.4.14.tar.gz # xzf to quiet logs + rm openshift-client-linux-4.4.14.tar.gz + fi + # this package has a binary, so: + + echo "Current directory" + echo $(pwd) + mv README.md.tmp README.md + chmod +x ./kubectl + if [[ ! -f /usr/local/bin/kubectl ]]; then + sudo cp ./kubectl /usr/local/bin/kubectl + fi + # kubectl are now installed in current dir + echo -n "kubectl version" && kubectl version +} + +create_kind_hub() { + WORKDIR=`pwd` + echo "Delete hub if it exists" + kind delete cluster --name hub || true + + echo "Start hub cluster" + rm -rf $HOME/.kube/kind-config-hub + kind create cluster --kubeconfig $HOME/.kube/kind-config-hub --name hub --config ${WORKDIR}/test/integration/kind/kind-hub.config.yaml + # kubectl cluster-info --context kind-hub --kubeconfig $(pwd)/.kube/kind-config-hub # confirm connection + export KUBECONFIG=$HOME/.kube/kind-config-hub +} +deploy_observatorium() { + echo "=====Setting up observatorium in kind cluster=====" + echo "Current directory" + echo $(pwd) + + echo -n "Create namespace open-cluster-management-observability: " && kubectl create namespace open-cluster-management-observability + echo "Apply observatorium yamls" + echo -n "Apply client ca cert and server certs: " && kubectl apply -f ./metrics-collector/test/integration/manifests/observatorium-ca-cert.yaml + echo -n "Apply secret with tenant yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/observatorium-api-secret.yaml + echo -n "Apply configmap with rbac yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/observatorium-api-configmap.yaml + echo -n "Apply Deployment yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/observatorium-api.yaml + echo -n "Apply Service yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/observatorium-api-service.yaml +} +deploy_thanos() { + echo "=====Setting up thanos in kind cluster=====" + echo -n "Apply create pvc yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/thanos-pvc.yaml + echo -n "Apply configmap with hashring yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/thanos-configmap.yaml + echo -n "Apply Deployment yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/thanos-api.yaml + echo -n "Apply Service yaml : " && kubectl apply -f ./metrics-collector/test/integration/manifests/thanos-service.yaml + echo "Waiting 2 minutes for observatorium and thanos to start... " && sleep 120 +} + +deploy_prometheus_operator() { + echo "=====Setting up prometheus in kind cluster=====" + + WORKDIR=`pwd` + echo "Install prometheus operator." + echo "Current directory" + echo $(pwd) + cd ${WORKDIR}/.. + git clone https://github.com/coreos/kube-prometheus.git + echo "Replace namespace with openshift-monitoring" + $sed_command "s~namespace: monitoring~namespace: openshift-monitoring~g" kube-prometheus/manifests/*.yaml + $sed_command "s~namespace: monitoring~namespace: openshift-monitoring~g" kube-prometheus/manifests/setup/*.yaml + $sed_command "s~name: monitoring~name: openshift-monitoring~g" kube-prometheus/manifests/setup/*.yaml + $sed_command "s~replicas:.*$~replicas: 1~g" kube-prometheus/manifests/prometheus-prometheus.yaml + echo "Remove alertmanager and grafana to free up resource" + rm -rf kube-prometheus/manifests/alertmanager-*.yaml + rm -rf kube-prometheus/manifests/grafana-*.yaml + if [[ ! -z "$1" ]]; then + update_prometheus_remote_write $1 + else + update_prometheus_remote_write + fi + + echo "HUB_KUBECONFIG" ${HUB_KUBECONFIG} + echo "KUBECONFIG" ${KUBECONFIG} + + echo "Creating prometheus manifests setup" && kubectl create -f kube-prometheus/manifests/setup + until kubectl get servicemonitors --all-namespaces; do date; sleep 1; echo ""; done + echo "Creating prometheus manifests" && kubectl create -f kube-prometheus/manifests/ + rm -rf kube-prometheus + echo "Installed prometheus operator." + sleep 60 + echo -n "available services: " && kubectl get svc --all-namespaces +} + +deploy_metrics_collector() { + echo "=====Deploying metrics-collector=====" + echo -n "Switch to namespace: " && kubectl config set-context --current --namespace open-cluster-management-observability + + echo "Current directory" + echo $(pwd) + # git clone https://github.com/stolostron/multicluster-observability-operator/collectors/metrics.git + + cd metrics-collector + echo -n "Creating pull secret: " && kubectl create secret docker-registry multiclusterhub-operator-pull-secret --docker-server=quay.io --docker-username=$DOCKER_USER --docker-password=$DOCKER_PASS + + # apply yamls + echo "Apply hub yamls" + echo -n "Apply client-serving-certs-ca-bundle: " && kubectl apply -f ./test/integration/manifests/client-serving-certs-ca-bundle.yaml + echo -n "Apply rolebinding: " && kubectl apply -f ./test/integration/manifests/rolebinding.yaml + echo -n "Apply client secret: " && kubectl apply -f ./test/integration/manifests/client_secret.yaml + echo -n "Apply mtls certs: " && kubectl apply -f ./test/integration/manifests/metrics-collector-cert.yaml + cp ./test/integration/manifests/deployment.yaml ./test/integration/manifests/deployment_update.yaml + $sed_command "s~{{ METRICS_COLLECTOR_IMAGE }}~$1~g" ./test/integration/manifests/deployment_update.yaml + $sed_command "s~cluster=func_e2e_test_travis~cluster=func_e2e_test_travis-$1~g" ./test/integration/manifests/deployment_update.yaml + echo "Display deployment yaml" + cat ./test/integration/manifests/deployment_update.yaml + echo -n "Apply metrics collector deployment: " && kubectl apply -f ./test/integration/manifests/deployment_update.yaml + rm ./test/integration/manifests/deployment_update.yaml* + + echo -n "available pods: " && kubectl get pods --all-namespaces + echo "Waiting 3 minutes for the pod to set up and send data... " && sleep 180 + POD=$(kubectl get pod -l k8s-app=metrics-collector -n open-cluster-management-observability -o jsonpath="{.items[0].metadata.name}") + echo "Monitoring pod logs" + count=0 + + while true ; do + count=`expr $count + 1` + result=$(kubectl logs $POD | grep -i "Metrics pushed successfully" > /dev/null && echo "SUCCESS" || echo "FAILURE") + if [ $result == "SUCCESS" ] + then + echo "SUCCESS sending metrics to Thanos" + exit 0 + fi + echo "No Sucess yet ..Sleeping for 30s" + echo "available pods: " && kubectl describe pod $POD + sleep 30s + if [ $count -gt 10 ] + then + echo "FAILED sending metrics to Thanos" + exit 1 + fi + + done + echo "available pods: " && kubectl get pods --all-namespaces + +} + + +deploy diff --git a/collectors/metrics/testdata/service-ca.crt b/collectors/metrics/testdata/service-ca.crt new file mode 100644 index 000000000..47f453795 --- /dev/null +++ b/collectors/metrics/testdata/service-ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDUTCCAjmgAwIBAgIIOHGP1ZF18cIwDQYJKoZIhvcNAQELBQAwNjE0MDIGA1UE +Awwrb3BlbnNoaWZ0LXNlcnZpY2Utc2VydmluZy1zaWduZXJAMTYzMzkxOTY0MTAe +Fw0yMTEwMTEwMjM0MDBaFw0yMzEyMTAwMjM0MDFaMDYxNDAyBgNVBAMMK29wZW5z +aGlmdC1zZXJ2aWNlLXNlcnZpbmctc2lnbmVyQDE2MzM5MTk2NDEwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhJku2cPp5qE7lYBemurqO/K43d0fBA0wR +P77k1HjBRaqvEMbr+veusyo71iGf+SVFxkW3dksNogsR6lePd1VXf+659ZSEGqQl +gD8QQFTrytELX05xz4yH/rape41WLr/U1fpdRiDcA5xmPYkznR3MwWtnJYj7kVdP +0rlLSNyn2XfSlh058Genn9HoDlUtYpKmtOloCzGsDKj+W4/FL4w7Ycaw3MSegSeL +ZaoNFKDJFvqs41VxYnyfxuSQw4Im31oma5DgiFlKkwABhr6dA/ciUilAeBC0WWf7 +4B7za4557XmCTCF9wCnBguwtKQgwSOIaSkCATZLgWwUgvqyFc1D5AgMBAAGjYzBh +MA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQCePMN +BoKWJSACjOJV8zyxvFCUkDAfBgNVHSMEGDAWgBQCePMNBoKWJSACjOJV8zyxvFCU +kDANBgkqhkiG9w0BAQsFAAOCAQEASlgYzir8CfBOvoTS5eEuFnYNZtusmj+titRs +7nGLqDmbpAqXYxq7HG0sOEz7OMNo/Wze/8LQWCL3sFHIY6vuErbsns2hCw2EI0U5 +cDU7Ph06Mk6IAD5t3YrfkxMOxly1ASNNYijsE7pMkszNu1ZhLQ4qQbxigA6Uu/AQ +wkguGKPsv4pQYHbqSfG8FoThXfgOQk/mhBYfiL0dwyt31uVPqcNVc1uQ8XWblfE8 +Jjd+i+pHAbCR8MmQQK8aleIzb6y0OXBD9EQ3PP3rWK1a8xbabnjILi8A1eCWmMx7 +NXRxsbkJr9EVg6En5b7gNbs+pXa5FLYFV7i5px9d4MtnpG7+2w== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/collectors/metrics/testdata/timeseries.txt b/collectors/metrics/testdata/timeseries.txt new file mode 100644 index 000000000..16888dac1 --- /dev/null +++ b/collectors/metrics/testdata/timeseries.txt @@ -0,0 +1,10 @@ +# TYPE instance:node_num_cpu:sum untyped +instance:node_num_cpu:sum{container="kube-rbac-proxy",endpoint="https",instance="ip-10-0-132-126.ec2.internal",job="node-exporter",namespace="openshift-monitoring",pod="node-exporter-7mpbv",service="node-exporter",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 16 1624345682542 +# TYPE instance:node_vmstat_pgmajfault:rate1m untyped +instance:node_vmstat_pgmajfault:rate1m{container="kube-rbac-proxy",endpoint="https",instance="ip-10-0-132-126.ec2.internal",job="node-exporter",namespace="openshift-monitoring",pod="node-exporter-7mpbv",service="node-exporter",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 0 1624345682542 +# TYPE instance_device:node_disk_io_time_seconds:rate1m untyped +instance_device:node_disk_io_time_seconds:rate1m{container="kube-rbac-proxy",device="nvme0n1",endpoint="https",instance="ip-10-0-132-126.ec2.internal",job="node-exporter",namespace="openshift-monitoring",pod="node-exporter-7mpbv",service="node-exporter",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 0.017999999999998788 1624345682542 +# TYPE instance_device:node_disk_io_time_weighted_seconds:rate1m untyped +instance_device:node_disk_io_time_weighted_seconds:rate1m{container="kube-rbac-proxy",device="nvme0n1",endpoint="https",instance="ip-10-0-132-126.ec2.internal",job="node-exporter",namespace="openshift-monitoring",pod="node-exporter-7mpbv",service="node-exporter",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 0.05104444444444501 1624345682542 +# TYPE kube_daemonset_status_desired_number_scheduled untyped +kube_daemonset_status_desired_number_scheduled{container="kube-rbac-proxy-main",daemonset="machine-api-termination-handler",endpoint="https-main",job="kube-state-metrics",namespace="openshift-machine-api",service="kube-state-metrics",instance="",prometheus="openshift-monitoring/k8s",prometheus_replica="prometheus-k8s-0"} 0 1624345642196 diff --git a/collectors/metrics/testdata/tls/ca.crt b/collectors/metrics/testdata/tls/ca.crt new file mode 100644 index 000000000..31839ec10 --- /dev/null +++ b/collectors/metrics/testdata/tls/ca.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaDCCAlCgAwIBAgIRAL7dagm3GMjnRKpG1p85ZpEwDQYJKoZIhvcNAQELBQAw +RTEVMBMGA1UEChMMY2VydC1tYW5hZ2VyMSwwKgYDVQQDEyNvYnNlcnZhYmlsaXR5 +LXNlcnZlci1jYS1jZXJ0aWZpY2F0ZTAeFw0yMDA5MDQyMDQ5NDNaFw0yMDEyMDMy +MDQ5NDNaMEUxFTATBgNVBAoTDGNlcnQtbWFuYWdlcjEsMCoGA1UEAxMjb2JzZXJ2 +YWJpbGl0eS1zZXJ2ZXItY2EtY2VydGlmaWNhdGUwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCvC08mBi7y342K3jlmG8E3L8wrmprIdkWGtoWwAzJgrli4 +ZIDpw8VqY+R7O6SaHHkdkl8FCacvPTIVKRgV20rDAiQ+PbvF3oSAqZvT3REE0PPF +wo2arRT5gWhoGyWz+evWYvGZ7b/b8fEBJmVbg0iNiqX3NnXW6ICjlZOq5TxQmjs1 +f3rx8eGo27tGyJ/KuJBl2Ilc686FOxIwWjBWLnw9WLJFKqx6w0BU3p/kYXWgO2il +HtBSCtvN6K3387KiHMnHf5PAPl2pp0X78yZirG591w3Ku5jCkBDJNaLPkfkpZ9mn +lhIqFtOFUjobAPFp/KhR0II52JAlPziwfxIn6fVPAgMBAAGjUzBRMA4GA1UdDwEB +/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MC4GA1UdEQQnMCWCI29ic2VydmFiaWxp +dHktc2VydmVyLWNhLWNlcnRpZmljYXRlMA0GCSqGSIb3DQEBCwUAA4IBAQAxD54R +lA0cmlYKOJNsaS9XOBbAtKL18Yo+URq6R1ua4G8ESLIvmMxiv+cPt2jgBg2M+4Al +/CN3xaoUjDDNCiuHvJNiHGHk4Jue96YYG2ZJfrs2c9addevQYlq/7AH4dhksaIEH +hSZ32utRjSOo56X1/j6wsIS4HJ4uvnUAaMc4kMqkP6seK6J0cTDqr3hC9G6RqOdE +7fXZnRVQr+nwdq86OWPNm7wSeSiEeiIOPdjevgBEwmgGT9sBEefsKOICXRKohzOW +CBLQY+tFjiNa/uCnEFhxVsUW28YO2ISmXi+v3LAynFBL8ggBtqRnDzEEezKZnPZU +0LXATouzW5VATjv2 +-----END CERTIFICATE----- diff --git a/collectors/metrics/testdata/tls/tls.crt b/collectors/metrics/testdata/tls/tls.crt new file mode 100644 index 000000000..6b83bb7ec --- /dev/null +++ b/collectors/metrics/testdata/tls/tls.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIRAPKAy+Ft9wZMCgVso9IWl5cwDQYJKoZIhvcNAQELBQAw +RTEVMBMGA1UEChMMY2VydC1tYW5hZ2VyMSwwKgYDVQQDEyNvYnNlcnZhYmlsaXR5 +LWNsaWVudC1jYS1jZXJ0aWZpY2F0ZTAeFw0yMDA5MDQyMDU4NTZaFw0yMDEyMDMy +MDU4NTZaMFkxFTATBgNVBAoTDGNlcnQtbWFuYWdlcjEMMAoGA1UECxMDYWNtMTIw +MAYDVQQDEylvYnNlcnZhYmlsaXR5LW1hbmFnZWQtY2x1c3Rlci1jZXJ0aWZpY2F0 +ZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMllqKIBEMmBx+ssje6G +CKoYTOcK28E4V2bItAx9+/1x7iyLD+8dQ3DpOhLEbkVLRxUz6WHpfjKZtvYWmHak +MmpjA0g9G+gqbvb2I75tyWwiRWBK2xIKB+7ciW2JIH/PF/eXZuZpUPfEskL8PjZA +SIvGVFkwSRkpAfQUBxtUr074ncbPHw7+giqSHDUraCysCjCzYqS/5S0x/3Vtv94R +gY9FeIGb9F33C7z6jkAz6xIiE5DZBu0urDhuj5icWfWc9wRJSeYZCn2HWmgGG42S +zAHRQcLobUf3+l2yBeWn7Hoh22KvUWywnJ1rJyS3XsN4pifPqvNwUT3I+t4iXy+9 +9isCAwEAAaNWMFQwDgYDVR0PAQH/BAQDAgWgMAwGA1UdEwEB/wQCMAAwNAYDVR0R +BC0wK4Ipb2JzZXJ2YWJpbGl0eS1tYW5hZ2VkLWNsdXN0ZXItY2VydGlmaWNhdGUw +DQYJKoZIhvcNAQELBQADggEBAHuMZZ+OSlC9OWkXYCe7VE+i8M59R4w83XZOROFl +jOsDtY1U66+wJKGqwNaHXJjsw8dJp6M37n0vlIRiSw395u8j2ABo6U6beWJ/ipuc +/D54r+37sB7qrB/tEbkSSY6RI5ZCQzgxFddGbrftTI7quls0xmxs0uyLfL8SPPWC +jaXor5KJlf+UY2FQVGgSQ3mmZMXkO128bWbxmGh6itEcZxzYpMSFQI3d3oEDSf31 +ePCOCPSEmrVfTjrQ7p6YSS9rHHAKnlA78636BdKjhkdFdc5eW/IsZlRzrMSWDvfq +0tOOwe+IlyKhCfE+EN1ATga6FHo4JSXTEzAIo4L5/N/dnwk= +-----END CERTIFICATE----- diff --git a/collectors/metrics/testdata/tls/tls.key b/collectors/metrics/testdata/tls/tls.key new file mode 100644 index 000000000..11265f48a --- /dev/null +++ b/collectors/metrics/testdata/tls/tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAyWWoogEQyYHH6yyN7oYIqhhM5wrbwThXZsi0DH37/XHuLIsP +7x1DcOk6EsRuRUtHFTPpYel+Mpm29haYdqQyamMDSD0b6Cpu9vYjvm3JbCJFYErb +EgoH7tyJbYkgf88X95dm5mlQ98SyQvw+NkBIi8ZUWTBJGSkB9BQHG1SvTvidxs8f +Dv6CKpIcNStoLKwKMLNipL/lLTH/dW2/3hGBj0V4gZv0XfcLvPqOQDPrEiITkNkG +7S6sOG6PmJxZ9Zz3BElJ5hkKfYdaaAYbjZLMAdFBwuhtR/f6XbIF5afseiHbYq9R +bLCcnWsnJLdew3imJ8+q83BRPcj63iJfL732KwIDAQABAoIBAE+kyRxP6ZCWpzXE +Z9iE3tZNR9QyghcWLdIy2qj6BXYD4RlyYPnBi9MSGGoljioPC+xBnDChQsvF6AnH +g8GfS2SxBTJDIE8ewMUfL6F2DyqZHvBmid3HY729LwfHK7f/cM3z7Lh/u4pQGeLN +l9iicU39P4wJwU6W3fPlG65ePfmvOjNi/xxC97nU7GCbdBujFYDqDHCqmDOgQNTK +LkR6v0PnqZmd5BvAsMGkoQnouqZcscNSxI70+VV2yfMNxv0XEnGtokMWAga2Iy1R +ycHMt6CjMrt3QLXOTeF8QUDfX4ANMRaJWiRKt4SkBLyChwnuzYwv1ozHPIsFoovH +jcxnV5ECgYEA5d2ENKGCASRIED3iEKeUCVcej07hVtob1/zM97bwMgfX2ozFUF1s +KWXeEICzDTSwCKJGY/rrhepzwa4l0I/H8iq+ja66qiBgFgqEupPO1FwitmbQNPdm +T3GEiNgP0en8TByWM2YN7hdqHBG++dzgxwrUP1uzR/YJYCEs43ra8akCgYEA4EuL +RhmVogmrvrAFSZTi472LA3PT+m8EIf0Y+5vsejlL4TX3kNAz/rcwD+IT3GHpZ0c6 +y6AYLccWu//SQCfyowiUJjTlhZX1ViF1kwAVIMtMOmVKd03cxVPkXdaeX1ZTZo+I +zC7URQQvrA4v15D3E+tfYAysLobGtfT5azEHNbMCgYB+4XJLVuca2DsK9A2n4che +C3+r3P+XYFdENp+xEIKvKxMj6NY1UQwWIcuc0l2DKZUNfv1ZzLRavBpfS1BGOkdE +zgFW1Z7lr286W13+Wv4szBrBEilVQ0ZvDZr5qkG5Pe3s2U6zWl7QEI6apdUDuL8+ +PyT/QA/K5e0w6b8MZdEa2QKBgGzpYdyNKdJ/3ax/bmHXcQDjFfB8Ou99HsWeT5ZF +lWOYFNdrzCW+Y7EIpMbhoYE+7gYfWVZtC7CW3tw1EZPjkfdGgCG5R8ZBFdtX7e6O +eLPu47nrW2hpH42V+ery/v4OPfssMWHsT1if+sISNXr4QnGitjI+lREWksDiDobV +TMnvAoGAOX6A/z0kAtXRhqxQzd+UMa4cLobSKITl5yM3vFE3BMxuGfU2ZbK5wyV7 +7vbgPuooZPyKD96t8vPOsgF6Mp5xL28DfPmuFmTN7/IdwaGjMOy0ZGCMgCWFB8i4 +RZuB6Wsydah0yMs/l96HLqL4TVjr0JNewdgzhH+AHFvma7TkmbY= +-----END RSA PRIVATE KEY----- diff --git a/collectors/metrics/testdata/token b/collectors/metrics/testdata/token new file mode 100644 index 000000000..7c4e8b386 --- /dev/null +++ b/collectors/metrics/testdata/token @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiIsImtpZCI6InpYenBnenB1TG5UYTc5SDRKUTBPXzBaV1AxSERiYy1oTHVlLVhUWFU4TUkifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJzaW11bGF0ZS1tYW5hZ2VkLWNsdXN0ZXIxIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImVuZHBvaW50LW9ic2VydmFiaWxpdHktb3BlcmF0b3Itc2EtdG9rZW4tbDVjODkiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZW5kcG9pbnQtb2JzZXJ2YWJpbGl0eS1vcGVyYXRvci1zYSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImIxYjg1ZTE5LWQyMjAtNDZhMy05MTFiLTc2NjhiNjE5Yjc3YSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpzaW11bGF0ZS1tYW5hZ2VkLWNsdXN0ZXIxOmVuZHBvaW50LW9ic2VydmFiaWxpdHktb3BlcmF0b3Itc2EifQ.NHfbHU8q6Zdbl69eLnBHLiGvGxbPBbv04hFdIgyfQvwX1BmLa996ZXi9O39QP_fDx0_J6Iq9HBzB7bpza1pznG3pikfzd2QIQt_SFww3_ch5AzKbSMSPC9ZvULNq3INeJrD6u44sqBZ4YfI0YeQX0ggkTBTIuEj-dhGDU0zmWIhMpF8sGu0STxjrxkZjilfOt-Ct6SVSzKULxXTyyN-KEsFmiGV_ZsHelgcg_LtYfVVgci5QGVe8IUUnMwYb1eaFdxX9wiHBA3pxgSROKQoEAH-hquQWAAWQkQkDcNQibifFTEOHXvT6YJ47lxWvEBMPFQ1xvTJ8vwUb4Vm_uionHlv8KPCNILOIp6DmHjHYpM9r21s9LJWfh2cD2BXy_NZTLK0V9XlqsVwgcdjcAap_BcRKyiFk1gd5UtBH9sdIqgfcF3ZthnasNVei45UsIqVTvFzx3cLRMIKHD7-A5fFYq443CGa1TtQoHsIQUtgcCtxhksIOEGkU6jyTefg9fgU5euyw0dpl_YiN02VJ0DeYWnlEcMAo3xUzzXZCdKFp_tUyfG_VOuv1Co-j9xxR_AMdzNnwFUytaVy5oeO5zsNs3VBPozSHyyYnw6DNRSg38gtq-Lhk7bO9LMwQrH4ckIWRN_NwKKfHcDoR7oMclo7SHctZFX8POnF7FLOwSjDNMo8 \ No newline at end of file diff --git a/docs/MoreAboutPersistentStorage.md b/docs/MoreAboutPersistentStorage.md new file mode 100644 index 000000000..09d44ffb1 --- /dev/null +++ b/docs/MoreAboutPersistentStorage.md @@ -0,0 +1,43 @@ +# Persistent Stores used in Open Cluster Management Observability + +Open Cluster Management Observability is a stateful application. It creates the following persistent volumes (the number of copies depend on replication factor set). + +## List of Persistent Volumes + +| Name | Purpose | +| ----------- | ----------- | +| alertmanager | Alertmanager stores the `nflog` data and silenced alerts in its storage. `nflog` is an append-only log of active and resolved notifications along with the notified receiver, and a hash digest of contents that the notificationn identified.| +| thanos-compact | The compactor needs local disk space to store intermediate data for its processing, as well as bucket state cache. The required space depends on the size of the underlying blocks. The compactor must have enough space to download all of the source blocks, then build the compacted blocks on the disk. On-disk data is safe to delete between restarts and should be the first attempt to get crash-looping compactors unstuck. However, it is recommended to give the compactor persistent disks in order to effectively use bucket state cache in between restarts. | +| thanos-rule |The thanos ruler evaluates Prometheus recording and alerting rules against a chosen query API by issuing queries at a fixed interval. Rule results are written back to the disk in the Prometheus 2.0 storage format. Rule results are written back to disk in the Prometheus 2.0 storage format. The amount of hours or days of data retained in this stateful set was fixed in the API version `observability.open-cluster-management.io/v1beta1`. It has been exposed as an API parameter in `observability.open-cluster-management.io/v1beta2`: `_RetentionInLocal_` | +| thanos-receive-default | Thanos receiver accepts incoming data (Prometheus remote-write requests) and writes these into a local instance of the Prometheus TSDB. Periodically (every 2 hours), TSDB blocks are uploaded to the object storage for long term storage and compaction. The amount of hours or days of data retained in this stateful set, which acts a local cache was fixed in API Version `observability.open-cluster-management.io/v1beta`. It has been exposed as an API parameter in `observability.open-cluster-management.io/v1beta2`: `_RetentionInLocal_` | +| thanos-store-shard| It acts primarily as an API gateway and therefore does not need significant amounts of local disk space. It joins a Thanos cluster on startup and advertises the data it can access. It keeps a small amount of information about all remote blocks on local disk and keeps it in sync with the bucket. This data is generally safe to delete across restarts at the cost of increased startup times. | + + +## Configuring the stateful sets + +In the `observability.open-cluster-management.io/v1beta1` API, one fixed size is used for all of the stateful sets, which can result in wasted space. View the following example: + +``` + //defaults shown below + statefulSetSize: 10Gi + statefulSetStorageClass: gp2 +``` + +You can now update the stateful sets individually in the `observability.open-cluster-management.io/v1beta2` API, as shown in the following example: + +``` + //defaults shown below + StorageClass: gp2 + AlertmanagerStorageSize: 1Gi + RuleStorageSize: 1Gi + CompactStorageSize: 100 Gi + ReceiveStorageSize: 100 Gi + StoreStorageSize: 10 Gi + +``` + +**Note**: The default storage class, as configured in the system, is used for configuring the persistent volumes automatically unless a different storage class is specified in the custom resource specification. If no storage class exists, for example in default OpenShift bare metal installations, a storage class must be created and specified or the installation of observability fails. + +## Object Store + +In addition to the persistent volumes previously mentioned, the time series historical data is stored in object stores. Thanos uses object storage as the primary storage for metrics and metadata related to them. Details about the object storage and downsampling are provided in another document. diff --git a/docs/MultiClusterObservability-CRD.md b/docs/MultiClusterObservability-CRD.md new file mode 100644 index 000000000..d47e26332 --- /dev/null +++ b/docs/MultiClusterObservability-CRD.md @@ -0,0 +1,680 @@ +# MultiClusterObservability CRD + +## Description + +MultiClusterObservability API is the interface to manage the MultiClusterObservability Operator which deploys and manages the Observability components on the RHACM Hub Cluster. MultiClusterObservability is a cluster scoped CRD. The short name is MCO. + +## API Version + +observability.open-cluster-management.io/v1beta2 + + +## Specification + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
enableDownsampling + bool + Enable or disable the downsampling. +

+The default is true. +

+Note: Disabling downsampling is not recommended as querying long time ranges without non-downsampled data is not efficient and useful. +

N +
imagePullPolicy + corev1.PullPolicy + Pull policy of the MultiClusterObservability images. The default is Always. + N +
imagePullSecret + string + Pull secret of the MultiCluster Observability images. The default is multiclusterhub-operator-pull-secret + N +
nodeSelector + map[string]string + Spec of NodeSelector + N +
observabilityAddonSpec + ObservabilityAddOnSpec + The observabilityAddonSpec defines the global settings for all managed clusters which have observability add-on installed. + Y +
storageConfig + StorageConfig + Specifies the storage configuration to be used by Observability + Y +
tolerations + []corev1.Toleration + Tolerations causes all components to tolerate any taints + N +
advanced + AdvancedConfig + Advanced configurations for observability + N +
+ +### StorageConfig + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
alertmanagerStorageSize + String + The amount of storage applied to alertmanager stateful sets. +

+The default is 1Gi +

N +
compactStorageSize + String + The amount of storage applied to thanos compact stateful sets. +

+The default is 100Gi +

N +
metricObjectStorage + PreConfiguredStorage + Reference to Preconfigured Storage to be used by Observability. + Y +
receiveStorageSize + String + The amount of storage applied to thanos receive stateful sets. +

+The default is 100Gi +

N +
ruleStorageSize + String + The amount of storage applied to thanos rule stateful sets. +

+The default is 1Gi +

N +
storageClass + String + Specify the storageClass Stateful Sets. This storage class will also be used for Object Storage if MetricObjectStorage was configured for the system to create the storage. +

+The default is gp2. +

N +
storeStorageSize + String + The amount of storage applied to thanos store stateful sets. +

+The default is 10Gi +

N +
+ +### PreConfiguredStorage + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
key + string + The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + y +
name + string + Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + y +
+ +### ObservabilityAddonSpec + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
enableMetrics + bool + Push metrics or not + N +
interval + int32 + Interval for the metrics collector push metrics to hub server. +

+The default is 1m +

N +
resources + corev1.ResourceRequirements + Resource for the metrics collector resource requirement. +

+The default CPU request is 100m, memory request is 100Mi. The default CPU limit is 100m, memory limit is 600Mi. +

N +
+ +### AdvancedConfig + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
retentionConfig + RetentionConfig + Specifies the data retention configurations to be used by Observability + Y +
rbacQueryProxy + CommonSpec + Specifies the replicas, resources for rbac-query-proxy deployment. + N +
grafana + CommonSpec + Specifies the replicas, resources for grafana deployment. + N +
alertmanager + CommonSpec + Specifies the replicas, resources for alertmanager statefulset. + N +
observatoriumAPI + CommonSpec + Specifies the replicas, resources for observatorium-api deployment. + N +
queryFrontend + CommonSpec + Specifies the replicas, resources for query-frontend deployment. + N +
query + CommonSpec + Specifies the replicas, resources for query deployment. + N +
receive + CommonSpec + Specifies the replicas, resources for receive statefulset. + N +
rule + CommonSpec + Specifies the replicas, resources for rule statefulset. + N +
store + CommonSpec + Specifies the replicas, resources for store statefulset. + N +
compact + CompactSpec + Specifies the resources for compact statefulset. + N +
storeMemcached + CacheConfig + Specifies the replicas, resources, etc for store-memcached. + N +
queryFrontendMemcached + CacheConfig + Specifies the replicas, resources, etc for query-frontend-memcached. + N +
+ +### RetentionConfig + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
blockDuration + string + configure --tsdb.block-duration in rule (Block duration for TSDB block) +

+Default is 2h +

N +
deleteDelay + string + configure --delete-delay in compact Time before a block marked for deletion is deleted from bucket. +

+Default is 48h +

N +
retentionInLocal + string + How long to retain raw samples in a local disk. It applies to rule/receive: --tsdb.retention in receive --tsdb.retention in rule. +

+Default is 24h. +

N +
retentionResolutionRaw + string + How long to retain raw samples in a bucket. +

+Default is 30d. +

N +
retentionResolution5m + string + How long to retain samples of resolution 1 (5 minutes) in a bucket. +

+ Default is 180d +

N +
retentionResolution1h + string + How long to retain samples of resolution 2 (1 hour) in a bucket. +

+Default is 0d. +

N +
+ +### CommonSpec + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
resources + corev1.ResourceRequirements + Compute Resources required by this component. + N +
replicas + int32 + Replicas for this component. + N +
+ +### CompactSpec + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
resources + corev1.ResourceRequirements + Compute Resources required by this component. + N +
+ +### CacheConfig + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Property + Type + Description + Req’d +
resources + corev1.ResourceRequirements + Compute Resources required by this component. + N +
replicas + int32 + Replicas for this component. + N +
memoryLimitMb + int32 + Memory limit of Memcached in megabytes. + N +
maxItemSize + string + Max item size of Memcached (default: 1m, min: 1k, max: 1024m). + N +
connectionLimit + int32 + Max simultaneous connections of Memcached. + N +
+ + +### MultiClusterObservability Status + + + + + + + + + + + + + + + + +
Name + Description + Required + Default + Schema +
Status + Status contains the different condition statuses for this deployment + n/a + [] + metav1.Condition +
diff --git a/docs/images/multicluster-observability-operator.png b/docs/images/multicluster-observability-operator.png new file mode 100644 index 0000000000000000000000000000000000000000..eadeec84cabd0a88440e5016e5d8af42b5c289b9 GIT binary patch literal 60266 zcmeFZcT`hb*Eee8sDLO+vmlBErT3~JA#@0#C3K_)2qCnD&=sZFKvYBoL_noCks=5P zsHg~t^e!UB&_TMuH@BYiJa4&U-1mJ=Qf-9x6r{cD3+4aF&)x?`bb98of*m6oj<l*N$` zNm*ejaVfJu<825IZhucGB`gXPoYA$hb9VD4dpVu`Gs3~!hVW-L3QAkgk0xp2j`Nd2 z>)DIxIAQhwjG;PsdO4Hb{tOcn7nTzi6Z><=+t=OU&t(FcOmKBTu1cuFj{YVcR&=u= z{XGN=)Cm3kp!45bu$7Z_v=Nt<5R#S@lNPd*cCZtYvX!wHl99D_l#+CClo7KBvXMz; za8z46cQ0u*tf&oEN)tt}CCGYv+Suw!iAvep*qG_r*vsKm(dtHMl&_P4pY17eHAk|x zrVmY1T}{r}R?SDkLElM6T2e<}oak*M?yiE;GxPPfA% z$pP=EjCJwScBkkNb!aF(7^I1n6?0cNG;l_H=@HOwaG+{>MryiZMxt5PL-WXXMJ9`&X zF|;~WPlN2|uoSB2Z993IQT2o!t2d{y)gCQm! z`ntyAV(LUP!9d*^?_@x+G1YXF_I6PxNVym|0mW|4x-ia5%1+it&mAqH>*I|k8;ZHv zN@IKsounijv~doGrbJUiZCSJy)mB?mM#TvQi=!Ods7|0sR8i-LSzLGRaCp#5)g07RcsFR70sh^PpS<{fFC4mOQ9F&a=#SMUJ zcd`T;r|jx#tl_Jn?WF9kj3O(0=!^TfDwCD*1WyTZq9`7xPjHo{8f$858@kFl8Wa5- zb=?e|X$~}sFJ9kNmFVk0@piXIdzd*ZgRY`b(x#H`C^H}UYU+UZz!U5=OtcAFL@{M? zEkiXeW3&&2io+=zlg-Gwc5Y@UJyjy|l!2=^4IzkvGjQo(94{Z%enwhhftGkx6t)V@U zhH+8xrWqRPso`ikQf}`0a{73*tU5)?NZO6&?d^lHbyUTR`Pzs(Ip|{C32xGQ7_6MV zgs}+@1YS*r;3DIQ#_53kqU20$v@~TzO_Y80z4X-7sHS8)3Qkr`8H-n@A{!z&`H8w2 z8|iyXVw|+l4yO9fD5|d-j3>y!OjJ8#bz~O!s;^F=X=s2cL8;3~!KZ_xwmy#Nf- z>Jd<)e#$1Q-c*XOql%cOv5g*49Uf7JVX_301_mc?>_qkP@T1CNs0K!IvUm+w69-dO zLvNgqzMl+6M?*{%hKg#+$Ddt7@G^E9+TtEgHW)EeeIm{jr!Q)tO(gpgZOn{q4cxUb5;TH` zuB4i+y&)FtJj%$!$O&(!sfiJ{MXT7;{Af56QDxXJ${DR;M>E!w&>^}w8*00W%8@ae zHYOx8nv9XfNw}GyvHBXK1Stbwf`hn;2XdzH56)E0LrqjhnWBZ&!x*a)iT2_qI(igw zT|*BCl9RLxMaRa&Ow-F-Syq&&qbiHi_Ey6?Q6=3?)G0m$H!XPF6;Vk^Z7s66Es887 zgBRB%5=qk9s_rOHH7v!{SX2X%XquUks;+~Si3(NO*HG6}L&`vs;DPrvaWr-&N$Bd^ zxJmpW2rVb-=!DiZ({OO~P}S5j@-=bRkkuvX$|$SJO1V&74J9ZP8%J1H%iY$6B117U zq>$~6jeOy08JwM-n75XLix0VrF@YcsYsYgxN4Z#+1NOcB;mIStf{VJLh-`d z8&b)dKH?HKp4z@Twyw6~k|r+FS{mNIG7fesqB6$fWD{RRI!)buoqW;SL=Rm9nw_^c znqseqgI%i0h+>Q#=a8-67xjB+F zU1{nr$}%)bM`;%`XEPI92@MHxZM+fUFEq7C64GX(XcB=UNstt`m2&a(MAXUE4L-0^ z`tV(iAd2{R31EQQKR*4pgNNULqZSEOi@fQaEnALl(L^g7(Gn*5oZXEax)o>EhRjvx z1{wG{Y&BH{T15}!I9*bH&Bc4;nA$$&IMHM2tXQ_=5$}byFXcz@p1JY(WOj-T)72cY z<7@4%yNO23IlL^}zf_Xj%onOQc*IAO6|_ga)5`Ud6g%?hxd-n)Ymp0F3d~IiYINSp z`ojQ^5C8N@$KKxl*ohO{Gcqy+Ic^M>e0=}@_1tiMbWBWWS($vQ`q8-CTel~icUsE_+X9oIyu&hXkMr~Q3E;9XI5<3QPL0wDOO8xuaBy%isS2tX ztbO#>)-%ps)8xb^;hw>__F<2nNq$Jf!MU}?W2 zcXo@mvijx+ta#Rj%innWoGN6Jf1$$e!uQ?Cys*Qtz1|!*9457Nbwe8*RdjW4HJ^w^ z9YJm~Q-GAQFDOe*&1?VtqP~9gf4i92;3(mD(jdYPsJfIK`PTNJB{Ss-=f5YjO+Qh= zMUObQYZNQvGi4F6D`;bdAe)FWGB!?5NkIh$cfWAn$NForhb+|JG1*gcocDn|C3E2D zfJoJ^kYB$7fG4#jF;!WE#iY{7z?{IfMP+n;@MchLa2@tcFbSV}BRTRXEHZa2L8UK2 z&g&R2zvKeFjmg8qgT96wh^wn0997r1K<8w)kN3_7`3HovaEC-(Y^r!795m_o|Asn%`p8{ltKk7l*NW>+x+__*C*hWoh7A z@v51o=I%Rp?zHM3>A=baEW4xC)KKW#_wHR4z~^2ZdUs0$gLzlAu^=LwxKK?R`4ks% z?V7T^{YkZ`qqny2WJ#ZX876;TX){V`&G+v$-qh9E)rReHmmeQcyLscr zm55`~(i}H9gbb7jQ@t-EqN3F6BDtgQ+zGjJXa9o4^&2;$VZGj73!R6OyY=<;gZgqY zX$L423YP*^P@Xyw0aKnlb?Ug1l67EkUm1n#98DA+5~IyrM60SEKYn~`S63HZI=81d zgEZ|euWw})NqFS@S9ElA#P#baEiJ}etBTAiySeWlG_dd#< zep=_jXIt`AXTf&8bF(|pJH*7qmcL%vJF_-jl{Fo8NQx1-_vXt}ZRD}^^fNaP%dmiw z`IIN+n3wRLJ&Wv;OVVi{b*eX`(r2`h`{!g2XS@vg-JMg~a8fxzRSoencCZk#0Rw~m z!1pUW!M|8BX)U9ZWzH?9k@2TaouUnfa*D#C)e^R-aQ^@)`L2BCYT5EKEn*jXN7K1^ z=9H9_W4tcm3pGq4$K>S?@rHoZR{!v}{qjU}A8T(JMZ2%5IaR$cGT>zxO#AFOKR+X| zjY}!$T+s61&L>ZwDx+Ho1CdAM_Vb2ptgj6%bXen+Hd`|cZ=n(&B_+kgJUzO3@2p|# zbUvZ!Xs4?*^C3x%^>acK&ek_>-Fmw`JJeSEzP|oS%MY}h8=TLq_q}(qldHg`6Q@uA z#Tzm^RA)zT%^2$}NbRxvTpS;9OiZkqWbQmzP3e&?nyjy{KhBGs#tY|`+YcXRZkU^I zkGp;Q@sy0X_A(nvQ18D=xB0-4MOK$w(`U@9BocTPp`83wbU_eAj z$e2%kc|ZLT?b~ip#LQG*5I?n8VawJ)$$+(m9&%M^#6^(fQvW4kn99Y)#b}Gy9tyVt z)u`uPQV(VLW6VBQc5zD{6%;C@hw2q5usYwQJMr)2H8&8iviu z6%4G^gSFv(sKoI@RX+W`>gS(zcbl=FDLPS8wYj12ZD`1t>{M}zx3h>C%Xp<*|FeS6 zRQei5M+a?h|74YHU7WR82BLpkzKq~4)*5ZaOhF&Ls+B&Dh(HI3Dhx(8+I?xtvZznk zvh4dJ7kWohz?^0(VuJ@N{7agfCE?)AOWa;^U;BdU1IL!>)?$?IwH}zz=wJQf)hmnj z;Es~0L`<5zXIJlHOU=dFRFiJ6SKdDiiq0S2INII({H52ZZBH?2AM2+Cxt2+}z_m&6 zDQC`})vtIO>X-wr;2Nm>x3r`n6gcr>*RGVvhb|?v4&^R+OhOOBQ2&8D{@jcZ{_{+{h>4|F{#CrhkmHCqSq*$xU7(CUc$l~^5u&GrB%l~{8?LPft_(W z(PeECEFy16cD7(iu+NV#rzn!fJ&(W>c$GJRK|9W?p{x5gMfJux#WeM(qol=?3E#fi zdQbQFO)hquzvw2x33ZRB43;UK_?Ig+LRscnRlH*4(GAUPQe684zB_RB=jo`id$F;p zWo2e~pk}_PEMP?(P|1`2LpP$L!dtpzTlXB6X66k6y-m)}{*?2vzJAcVU%_B$vSc80 zEiWs}t9){}J~psFw_9JiqvTftoTBOL1&ze<=_%dxa*v_E&^uJZm_61Gc6V4twf@*m z*%Pb|{tpuKWRVs9BKo5_*yw-<)V zPfiw;mdXaK49C(n;?uBciE*V~qnO@TzAZvDC#InjCr%WQdZZ`%E6ghvVq#-Eoa(Yh zM~Q?`VT;mzF)=ZnFTK+I%o~4rmkE6n5fJcVI*d5lN9X-#K1noPK#RY|3L*K>z6A|iFM)x)r(xz{KQGG=Vd>HEmYH&9!b!`CBL*>+nO3#f3%9Ynxs@_zau z;r&t9uY39*&ahct?x@;$+TX~=>iy%3Dz|Dg%D6Uacm2nY2E;p!U`EDLv$eFeT2Cpy zxL61{poBYbQOw3KD9G42#;?a9>%jvq%ATU05^`3(IXJ@CKeDZ3(-;_X`h(U;6tySR zR#kHPJhfat5kv8=FS;*com-hllIg(utp0S4=q=Y#|BXR6*9JCmd`wR42M=a6wo>HB zqg-6b#>BQ6yPAiq+(jkTK+jiiX{&f?fMDSA(?1kl$u%}N?{U(#x3D+@ZmGAJU}2G9 zK5%aK%>h{o{a)EE>hl}p>xfHEy!oY9{c|8lfLq1qs^H*-UMIfS%g-$jR|=z&x#$`q zVe-6tQO8@0p5`offyVF$aQggAPPX_FCdt@ z)(;TrGQ>pXiQewx?LusvoVAXHQu0hvCX}Ifd%h134+3y0Q#{$X5LCM|KROF8b`XFpo$!<1rrn|HGu>Zt0sw=0T7$K^gI>Gqcl}~G+(NSJfO(N#>Ta=76;MJA8x`}!TbLTB3@(bYzuA* zhsl2dEBk(Xm^_4F|8m8mtpz56|B7V(OB{9Z;K9EmHnURqvBI8ID1yi@5t{+?~4tXIFna2 zvAOyA*?N&GF2xB`K^(0e3E5t~IwxrM;=R!4E>~%bQupdH{onIygqQnFBdUmSjYHTX z0^-$UQc^K*T?*vA$1X10U5rcR5Hz`@dV|9TD1P7ANM-U44GnddPgILSF?s-Ck#`PO zE@v_0$a=b;wU%dleJ@z5aIgb!$5=H^zXqO!CIxRCOv5CK2fQ!*WXMd(ZU@Or;~LYD z0j`M1LClHAr;zqtmCyava+Oz zh;0#YAtbaV46Z^Hd&cB{r;G2Iu3fvfkJZrF);6U%{#HZ?2u_3sU*S=T#Q4x@-szbb z4Eo8HxGT|dJPNEo>hSWc`b?jWUjMYQd`*L=^2;>{%EK?Pu~cnmExMWK&70zQRhx@3 zcHm0HxgmkXi1giqY*%Z3_#96enT3g3FO-Hpc3%sONOul+dVuxiuDUI3kyQs+rQX53 z&B0fR%ey-UZbbAVW3SGzC=UKAXKg+YL!*a7{l|7M4MauIk$d+YLtop44n#))JKxu2 z@26(Z`R_O7w*G;(*TgZ(<5n%NWh2x)Kav%CBx}7Aje83Vm|kb!9?2d|;1&1=qzTq$ z4!+B*l8J6Zws`;I@I|}ffyDP(%UXOX`Gf_m-NHc9d;Z_Q-d(6AU9e{dI`lhjA7s=` zUa;ST{4lY-{W82KwK7JtTh^Yh&DP?X*4OIwBdb6;X(qv9XUm_YGwfhVa-Mb|4Mz^16h}EZ>~L13RWCxN{aFs2*f8|gj~VVtLo!m zZhrHh*H8;!487^FVt}l?&(zJ$rOgfN)M?!$MF^NS*9SI9i<2FOX&6ZSI5nqSJZ}$L z`uBs3RIF)<8#j8#PL?C?YpaP0KKt0&pSJh-%Iot&w0f6MkLBYKBRlZhed+wT|g0p6>$XdVlXGGrtVEA8l$H zSGLO|9TH^E|N4EM%joA&G4KER@&Aek`wtvA&MU|r^WcHg&lXj*GT47K+l6ylYUt`3 zXBGGg4uefFK^l$T`UY@tsu z$PV?2di6H)xZzj#6TDf2y(h>8cJJ9_dn4Yv$S%D%pWqT_V3FZ{eyH*M9EU=f&gI%$ z5!qq?=aK)zvH!mcj6Dx@XS_X2$-Mrluy1o{@Glh|6BQ*YM6+sJ}$V zh_*(Hl%}0DgnA9{?>XG{G@l`63O5Jv`o!_$)nnnxr)$rjirc2b&1wq8nUxTKEpY~L z?azM`^RMZdDc_l<&VFD+5R(=c{XOTu)<6={dWtJgn)hDO3HyKi@OZ~w!2i;#Hn%_g zi#CWYD!)FuqjMRcWcF`1{0|6RUt3a#49ov?|Nm(Xd%z%LU4?1?qbUv{`2Zhs5Cfwi)linXGR#1UrR z9MD@HuI$8*WVZkOdos4E_PjM_9+AymoJ^R*m4-GeI8IF0tj9Zk*X469*~b`OSI*$m?KVS$LTxu0 zK87{5r(RMws42#9%SP%RmRV^%oULb7y57{^Mde}MOC5-_ga$$yURM~ zE~rs1+rM?GEs+w)FY^50X=G$X>*Im68anFe`A)oG-YWl@o+VIzgFSqbP2<+*lo=h3D`rKsG8wV zU2PhGvnz0I-d|h7Cm>to-oFhJtF*Qgl`vyWQD#OZqf`12s$nL(9ViSHNse6SeB^-M zTrr3I|Dt0MX}Q>*A+2)Vg;UY2h!KjPpZos9qb9!}&21NiM@x`L``ITp=1V{vu)f*v zXC~h>VvLUn)%13G>04N2m#aes;cC_9`t*dreS9GMZG<)LXRJB5tkuM0>oyA27z#Ik z36bbVkJ9au%S_x4O=BM2gvb`D-Qc63RM3fU&wRGZe+=PFj(#OVu?#FrxW(wWxzh9e z=hHB<63(5tk>H!(<%%Q|bC1rRLfC`fv-WZN_fy7|2(kUhayW)8SWZf+ZH83G==Sv4 zGbAQ|@xthzrLw{ThnIfS$ZyTDWer7!BR9cvk^oN4Hkmf%WTu+W3haWp*G%UYdLQMYF^ zQ6(ASMG!gj!hhhJ>Dg9xZrRqnOU z$HrS4+@e}qnpk2OyRdk?FN1Ga-R-+^x0o6PsjqNByOr)+N(P*Geh$ughkR?n<|lIy z2#I?!sJ{DchmY~Te^6o4F_{-*Z7@EG-mx^$v2`HO3msY;Ad9ge zmZR4(d(Sy7tNknzW>2V2&K^;@BE!BpX4kO6Ztrc#fO-rLA!?m)72CD)995!s_wHS& zQ^7DCzP`9Lqa;;BwA2734!)iv>S+WM5V^IXw>bL_>`CAtzx@#wc zf&|MW>O%=0MPuh&{aFwZ80#w9iAGnP?-6ZnGq$Q8t|?3x%22MZJkVLBc)-s3=pNSU znjc=jsUCjGKqqkb#DF$D@4Df=Ie|^T%}B{hy9p)O-uGgqxS$V{{(MW{Gi#GW*GTT2 zFAev!i4zeQs1BCl6BfrvA#&tFdk1M%-Oakn%p!;W^gHL_Q2CA%1d!3ju8(5g7d=z8 zQ(myqpG5|zeF$z5ZM782#VHAhtU)yr1pD)Pp-M99bgS{GaYoC-JdQ;2ob(T#fL*NB zZ}9z>YJ+gLoKXjPh`pNeQt zxPofxyo#6xw-ig8uQ7e?K;0ADFIfj>hU*Tqf_QLA*s|^S`|0*V;ukPpW32hGf5}8x z?Ub+n@FK0~aG3eS)IS=5>C_0vDcaySwy*XeIAu?Sc%D}ZV0 z3|5=#Xk1FWUN2j;B<53zm#ez$w_f>^_zB@;=IPt#}gIX0Ku>D!#lSRGx$>Uz{j= z%Hz*X|M?5ii`|qgPQh@EdO{soIV{=NS+D*2OYPRBB5o~ux(95dStwV4w;P>a(gKsC zz+M{_0u#@n>6~^hO9dXP>yfhSS5{T;b+3&W_S4L{-~Iq=j@}W=)Kp_rRZ<&#c12Zo zVwJ;KI$&*ltnrH5(qMi^O7Ro5NfVJr@;TH@FuK)~5x4Uc+FtCw3-^MvuP^@gMc^Tn z?paMI?92wuRX}64Tz%#RTdd9O`6T8H5e@QxnB!m zHOBj@E))?dU{_1DaZY)CVI?hY@pCoxZt8Udx{0l0)b8lzVXrUibDcSC^dpO_CziX5 z6XxFabUydnUYy$^Z3w{?6Jkz%BDy9TedU6U=F zKL&0zeWM|3lg7X~H(7>s){uTmyJhAvGf@OagfB;wRhN^ONe`Y2qu!;k%dt-Tq-0l`PnPnu2dPglkK1uY&V|27Pz`;AYZ+t4_s z=p>(BpBmHI+#9O?KGQ!I%ETo8*kX;Gu9XcV57YA{ec#G#yCzgk76!#j7L*he=k@Z( zf4;urG*HY9Tdv8OzLh?1jBiuNoCkAtu#nTPcCLu#``(@Xs|DwXI`~EqGSMGxyE6^c zZyEBbQ-aQ@afdc%=v2c6$Gz(WCUp=|+9^UugP3GvZ44_i^z2!i36C8k_{?W$ER7u372+ z8P&@3E=kbrT8VXH=b6^TZvP{^OTzcFM+t{30^{idwWz+_g}8mQ0jfVTQ{V2(sA|7i zYgKiCR%+;-+5>yR1y;;yu!7m75>rN3_n-!Y4%7MbBv)eNaov$G2^~jW)N|WH0~FBc z#MzF`jI?GTHKu(*Fw?cvP!|j_@3ysq{Uu37)DJEZyDERtQoA#xE%e&HV2X;LJo}KC zNqql#W4}km0Jbk<^=}1tX=ubV{RB7PJyv$DyJyo(q^Q`!zj5qKyVj`%jJNsMsr(Mx zI)eFegDOGPni*n=Qgab;7E=AJh|^-C@Kq{)r}lUidlm^kF5=@d6>F87Sg?8ZP2+fF z*9#RP!`WhRm*wj{)7nI=_tz&q=BazK;3g&@P@_UBxPs2lxYYB?>e<)&CKks~o%9wA znYvEQuyO7vivt9~vQ|+wVYzAhWq%MVs{hthaKkraTJf}mn+SDsQWE+>ZfPl8|J;gc zv&l17&?OP3%NEs}Qa*+$7*E~~t|j(65vGoO{D@O%3nL4?cwyMKL3b)Oq^#G!(NQoC z4*v}S5V3P1iZ^wAjQ_Qv{Xv9xiX8RLp!%Tf!~%i$^rFt|**-Z$T8e0WEyhbt!T5~G z>qX=q7O=Maw|h5DU%-Xk=lzBNGpE;i7L^{lcK$%V&5GHpr4csDKx%keDZ^@( z>Zlmu<4y!yB47~?xaLkA)4AMRYimP&$9QF5@|Ap;jsn*TFf#oB=^+yZcCYQ`q%Il% z=d0xl=`-dKj&3tQegKSwzru%=7DuU7E2~8@WPQ+5_t{qGlAdy5gI1P7Ou+D-Uq&7W zqr9)Z{aNy?tS^?@x!~7kEX4&7-*_PJoWWJ_;aS8xQYF9_gH~#5ZmsGc-jUL>a?ZLe z(}-X}sF?~%d7A=l3qL(3QP&5b8!r59^uCWbF8%U}*_RRQZ#`Y*JKb=WKjGT0cTqt( zZ?mR4zW20sr@{Po6ZNTxXUJ|^X5(EL|I!~sNilQ#48jYR)%t7qg2rrVw4G5?)oc^K z&F;%oxw!@YRlK|=_nwxGOD>sU=N|*gC<~q*LG>Y~N=TVxnV1IpI$$n6SnoALOkz1G zz9Z`Vq6v2OxwjU!vZoe-*EGGX$_u9T(t$0>vT2Y^!_=%SPp%6@*C~o;Ng7 zr^ii=GAw_r0{*h;@!~?1|0@D$WoH+T>%=b$qo;&kbrc$&_fOH;Gx>9`;u`>btwYSQ zi7vxwdZWv$ib;&^AQde&yU zB45iwgxU1edE>9mC`-`5d)$f);T0QBi|b1icP0}M*m0Llj5)qLAAn$AP3ogB?X4BS z!x-uZ9@ZsFicn}-rQV_k)8$Gw6>foj>;}n=HhFJ<3_YbY44Liqx&E&D6S>bVE63S| z=HJ1M0dZQuCyfU}XqDkMKlq}{Z6M0&z-2v-9HW8hzKXP-Zsv|J!Y1SL%SWhzH~5iQ zCDs#(Kv?|OZ?}it#6cL5QNsU1dE7aT4%TDj`w#j+2?%J*>B<`r$Ix9^c47(D_vltn+vI`;Wx$4f zg#Lo5?$KuM{+B&VHj{VSGJ`=A23>#7u$9y;xegF|BfTxp%7-K7#^cF80G&7jdGAdSY58c4qSoBwpc8BE6mM(;XQ3meC>$xbeHaql}q%&qEhyeFs3xMn}$iuf>5& z9{m2u!k*j}>cuAzu+arVazbRy|(o5 zEfzmC7gyz%TQ;39!$MK_Ich|RlK$m`NQMmMmx5cAo`RRf+VsM~q)a7Qi|JHowao7j z5p$Y~;tsD+oM}urXPGliq)&Fh{%781HnsBbbg!D6Cs#?ADC&{l*46nl$LY2OuXhCd zQr4E2mmgOyn!LlmhXjMKz!4O8{VRv3*fdN2XAs#p9F_7T_AZ}R>0^k+rj2Hw=Z#K<@ljX-r0s`x7ESLM4lJNc+#C6xnmflhRh4(84xU+X zokG3umY18{%b~}ob#hVX()$Wxyf46(vaSxpcVPiZo*Sk}e82VI@qPP4#kT&Cr1hzr zY6C5brcZz_gobkebb%(JOA%{tGlMfrvup0v=8ClDBvi5SrYN?(v8nQrE`v_6Kt^RS z0EfBPsq2pxrSy_9=3bdO3a-Mf#z)Q?aemlvY>B)*-GY4>Oo_qYjtyi6JmpqVH%EqG zYg@n3W$0=Vf4=!EM=H=P5&yJ5_Xr3#F*f@h61iL{Nz1sq-Heg)`;f}R%aZtohalj1 z+_HV|w|HHJd}83~zkdEm+vxKdIMU2cXIxCYq_M8K7ZKN%j}(5cck7e-6xOb1TIH1% zkhd@q_u48gXq}Z?!qt4bW)ju@2$fi@`ywhI8b!wj&uYYr9yF}0y3$;0*jnsLXI{Zh zv|t|ugGAY(=knFadwl%9R;zK!%ar{N&jr20F=6t!*Z=TBb7>E^xKFdu!Qt6wM`?2q(&tGm{JO`w`FFxPrtK|S8qQopbP9_r?ioIxIiK0ODp~QA`;^Cd=d^Dc$vwNd8`@%0 zypW^;7}mZTmou^Kz(h$Tw7!LTApepN&ut^zOP2Vx;@)|XuG4HJI28X;V?t@%_{>6* z)r8@=acOuN{S`lDzg}(At9JhND|^SIK5dMy#sGw`fm1*b-`JWx50ZBo;9EO3-IMCJ zIR(K3l0id~GzemCytlU(k{Gh5+3#8kU}U=)BU8>}j+f-^||9BgsMwQr+E zfNk|2dr+W@y|pvnfc+<{iCJp*sI>wZ|3vD&PvEW~JJuUH;!NNvudLXlD_LERNKbVG znlD9M{CJq0m3pVh~x-h`qFP9pu|(1JNq#rGXl+y7=!=Gu4)OK!VFRe~OpsnGSZPsV%la z=z5ITq>vU<*YomdAH0K9%T$R(DB#Z9r1l)}KmB<5t54>O`spXZ*iGpy4!SVsBjcr6 zyDn7=;d3_tG9n-YQhV4E;?#V85%cF6-ih3#ft=8?Fuv`fT9AHTPkV90P zJE<-B;>+>2Pw@(cnYnY<<)6fZbz||L@3n>L9V^LPkftbW9y)mb z90Tt{(t1{C88$ACnbL5N`t;{re@{Tvpgl;U%tf2t#5nI=769pqeXJA9t^!A>O?UyN zVu78!A^m;+hbs5{YBN&%hO!Z6lips~sEKo0`Ypg0x)&@n!=yw=9OxbMU+8&jY1|pa zQnz)0aT@f*hy&ICVYU&J zjfCh>f_-REQW(lQQPC=s#i+EsHZjFRbl$wA$7Q~0VKMP2Z6CN@T**rV@5V`P=E+Kk z$qh>&os-t{&d40K%bQnht@nz7_InZYN zbO{xR*>d#_OWOfpK9KX32dF%lFSEWl4mf7nwvFIo8ADaedQ@T^Z=OU5V|<#(&&fDY zzBlXN;^=&4;kTRl@aY}k(ZlP_nQb?&gl9^o9L$+vXiV5IHZGS*Iq_or9wF3*C!~dQtbuEf1G^a1v?_ghA z_RIv##IBOKxpqCd@#zbRt>teiZy`8ELf9u@{`!mgn}o0>3v6@eEXX0-Wk97AU}Eg-PsQ*F=3ni8*cn3LQ#lw zqpX2*8w2OIaCYq@6yVZQbJ-GApsXW%FD9q|wA~LfjQlTd@XyFPMw;Ql53cKVQl|#n zg8kxh^h`&_3>RY-kSgEIGx0lJt5M0quU7R$3K!1S9|`f#&Z#D|8V7(u2|8og1Cb~1YJqt@$s_0B>hw2?p6x>bZ@X|B! z{cBBr>{{z-od2nQ2#eTmn&zQ0Hhl;P?~&hwvmVkBLb3$FP5lFW>hBAxwiVdc zmT(Km8hgCh=S67RLrZ#|mu;*I!th^ahWBvn2}r%#4(in_E}Xa=i{0@JVJRuRi|hgy zk!qt01r9z2DUd$hXvuo^>@RIjkM+C8E`zTa;8hDgSLwshSlxxszO)cCfRxhB@ zJXo(2SK0Kno2^kTb^qwLylBTzoH?H*N@#a&ONBPn2?5;e)qiTs)n;2hZP>*nZ@xm_ z%QV);b5-s!y`01T<6vhQC5zt7#3`JgXR>wtpXx6IajWQBltb9Yd^>^IG1$6sAdIYz zyU0%-5VIGwe6}hfBC;3y8*Qx%wzcDf_EId;opTu5w_RN1sb^jvy|MJHBK(_*Zx~to zesI>zYfYVCa#{Cmx2rU~4NwT{O#b(E-Xbq8I~%rO-M?NP?72If^sy)S$h1u7>hT{c&~{ru5lQYpAyAXZRaSJA&C+c*&)I(#Gc zxSexYPb5^V2ccOP;l|oO_4#V6E!muFO^4Vs*GIELLbkc~m9cv{HdKeak*K}^$( z3mfxo&LuzsXCENzqiA4gXnNdee)LoA_YZf)d>SpXEecRV z+ud`ZGxgA+G3Y4CYF{*djpBYC`f$)pO=L^^4W#YiK1>R2$I-~EIJXNw8Ttc+rHrbj%vUMx;jtH~6FkM=(g{oOx(WcoryqaE0o8{Hg)<#~U07FJ%j-%fo_C`CCjxpc{lgVKoX`<3sk- zw+OdN8pb(q&9|CRnm3YN10zJ}dYO-3l#Jyu2x*NOKh$&Gr}c zNx@$Y^zOo4eXfh?sh4QaJMJQ#@-=sNtC6L7pYLnboP)Ol<`h`Peif<&o@K2(5&<3m zd}sG9Pu#YSqhLKuSoQPWF$Rxkx{gNY-wIWNY!;Q@+Nh#0zmwCv z{EiTACUoR4_HcGqhW(=_8MY^JyTrI3G&+Jqvzyza^YJ0So3`9sl!tFyUk>{O+sq{XJgE9djodo);ce zMCMB`lI8p06hKW^#c?6)wH|`?7~#G;?)UoN1sXLO+2Ot^?htbSI}@w&;z**282KzU zpqL$skwJ0lP-TAus5YovsZo>owNOR(x!L0(poM%5X-^nktl|xc4QEy-Y2IooOa}xG zao1MX>gL3qdy;M?+#RVVgdZxf{quy=)~UDEDjtnE7C;=ajExr|iU+g|WxJ+AB&oQM zHQ~Iz#!jV#PBF}YwCCDEkm`3oS7*9p{ z5*jIRcwZE!lHVIZb`~Z<^5Rg~>J7Hu>3w7TdY+7%;lSop{0VT&3`l@F8>JYgnTK(j z^YFqvOz8D(jWPHhw<{D(7$?p+aLK?h@c@U%eqovKH&|VLX-Gh!jUv7$FpC6GcWv3u4FXq$j=u{$I?ihX zJd;wQ<~02VMEnZ(@=$$%xKbdq1gt>Hjz|uk;sbLYhx)W5<`EnbE>w1MvEEUn#(Ze1 zA~7C72YLt8g;P$tV8w zUR)een3g&S33{`DLg$@Oj^A1Ph&u)Iu@mwDj1-<%=O?YkdZ51{CB%Ya#r{QUc^@>r zC_{N;A8S~d=vzYFH!kF8-WNU_u}14Jf<^B}#3Es2|n z$^LPPj>&a?}QS+tOPe2pQ(BeAN3Ke)Vgad5-6{;X$gcuJtG<@LPxi>W8v3pg+ z>Y_} zPXz#dq;qu-#26Y4&!H0VMl1u7sg&(}`giVz{|;0x&W)3kbLX^DPf(!Zp$hDjB?zbk zG^9Pz&h9ZUdxYwXbs3bk_g`R+p+uAs65Vqu@VlG4^egZFDw+P_HfExC2&2rH!+|4NOj zE*=#{?|}5EswT!N6^nCq{+$;F*{924Rl0hgO%bMy&a@)HYuicW;m>$@CvgRIvr zCMS&{e+tc95xbzuSPelIXwl;h5)xjmk>UTF(2J@3n!vm_fGT+UGjR->fS^a}=j<1? zf%x?)=9@?4YlK>zJ1%16Sd#mFVNI;_h~2<)(nBvp6^mK~R_H*a#`fYRKBH zPJcY1+HmyzTEPS_vLLh#eT4$3n7PLj>{1h=Xd9oYokS04tbugIa00UgtbCG-PZioj za2;m1-~=EQnpuTD2rrLy(&o5EpTxn*MqJ#J32Lt4J2=ciP75sqGcpAI2gm`)>sA4` zi!MC?(-;zZInwLP1n~TV=uTs(--9LdyiwAE>JvDjcAQjW|3vq}P3ZY(-OT}E(lEHF z$2O0L5}0=eP-`rvA60~`h6F=B4r!1OnxW*#)J29zd|mtU&~YHBq5nBF9W4!Lfvrz@ zylXK*(#r!ACatJoBitDrC>-~43vUPni(`um+6)BfDN!iuZ)~}?A1Y@vejiO(1b7)4 zQzO63`aSwiMi$EtZ-cX-AJV!(2#A0v9Vvt&y%hUczq<`|*rp>zX()W$L^f3eHH>jz zgJUyYyS>zyv%Vw*K%+=h&lWn2dEftByYHQ0`Z#1%kXrhXXQmS3S+(*%X3nfCs$9Es*^%QxHFUFmLpO+(F@guLFsPzXu$_ z{T?uS^6wdBvyoLL8a4l3!o%eE5(#$`?hb|k4MX|AHzUg@$5+dclqgzfgCLJb9vDwd z7!$I!+Uv;Hh7!Ora3Z@8N05l`JGp-dU=ctz`o9qYyY)_AK%2<(4}RE@nk39CzU4PB z{ug;~{!eAQzK=hZ5S5gvkRn55ie!qCsWP;RkTGLr$V?H1%CIO&%2-R8lCcO;hDbu9 zOo@agLqz83bKE_9@AuyO`~Lm`pV#O4;d#BVy4QW**L9u4aUSP!f+c@ABxfyp5)t#1 zw(YhS!fo?5$_gG4_hi^gJR|XvK87WzTqj-#@j$;VQ*S-Q+asR-|HgkJGMI3})HK|& zv)~MR-XW56SY4g=?}Py@zl3NG8uD_`m*3LTLX*j}xG%HK;Guc>hzP|Q4EjrJpPocy zek(CC0!j$H=#GcDL`9?W%6f=WF_h1PTI4b}VS^TG%QvB{n!36g7cUldFHoLWhF-SG zVmM{ad~FMwkwt_O@?5^FXF^$G-&-b5}!q*ODADm(( z$)XdYboJNWW3N_}bdr=DXBz_hlkz%8?;Y?d=$@}^J@clFA$!8>pI3g6FtNNamYkAO zJ=L6*rOWERj*_};p(?{LBaA2w*D{62Pbb}zndMC4DyMCt0! zyU~rg{t4gie~B}3LVv}!K6WkhAyd-?^Rd=F`uh6UAaYSytoxGt)QHG5Yvd<1UYM== zaVIZtFP^GloLhBAowKQzbMja?u72eWhebTX;jdDetO~^-r0t(( zCMPBcC8x2ng^gC#!w`~BP9Bd(%CWtX5}GNi(w%cO$8K%7HU^L5$fg`WR^ z1*PT`?ZM7MaR?BMLr}B$TJ#$L?ci#l#>v0x76qdbGX}jSvS_B^*1Xq2RTM)rBtA)nJRo*`XAdrIpnu&slWTLZT=l zqB`eHWy`1~MX{;`uOn?{>b`g{W zo_L;ujzv)Ok1FUxZQ8muBq)dm&y{IX1l=XbQlu?FERCdb*u*Qd^wLxXw%Ip#c^5PW z2h*bu_v=~eu)B<3oWt8&JCWJQxv4M3Lxlj9YM*I`!i2F0hXS3U3?wjSuyA>Lq&@kk zAV2?F-T-s|C03#>R8NluVqwn6+3}SvpFd|@VID~sdwBT9c#{SX0e~n?{r58%6*z2# zY7WbXgbN-X&*wX!BC?a$Nm&Xj4S6JV%0o5mdT{T6 zub^!Lk*`6js)ojsggkti-%@c+!q^sF#c!TGIG%yQ51u_^ftCRJq^S)FPKE8y&P8?1 zojP@@xYUCUb~7g-yYk!XlCiR|zxy@n7HC`K{x&L42$`3~ue%{u!PNGk)w2wU*2mJx z(XXWnynK9|rZdNk`T8%Z_J4SISZUPv!j~6FbZ_lE{AIaxdg{7{o(cQH{^ygUka5db z=>F4-StVH|z}Bloft>i+WboM#o#4B0EUN zEThi?3c8mbnn?`q2zG4dI7D+*eeQ$GCYrUXn+`>uDr0O@7Em52QhwoMd}Fof!%*SS z8KYq%MxBHXYVz01U#TNYKWDmL`73i+;k>Ka*`<K>xPC8t`EZ1DS-cw! z9bKU0_3?3M*s)rRO9l$vc&!HeU#}G&dYkp7GmD-^cXfz-K&jzzirDVmwbgTwn(L^Z zi`5d)KJKlqeqDeqSTf~)!xsMN%r*6?^4;k+G21vfZ5Tqf2 z$dOf6T3a|}o}2RU@DyD-F^*n(3-ZIPtc~$*zkmOJdxwRrP5ZIt&6}%|DYfP@-3>E1 znb}TPoOa+&6dV8jPMpL>#>O;kZ1~aUv~Sffw{IUlY?4<{IFoDGR{-yF=k8s~!-t3R zyGUF-JQ~N2F<$cVk(8GgK4}Awps>%mYdh7LMEW&EJ4j237_88~^R*61EgU3e(%ir?Dsja+w_A>S#%Ou^7uWihGGPmHVGB-a#t-D?Ym<3SSpp z($0Fd=g1C{Mh?3et4C2$(fHKVTC4T6fvd(!XSQFzeqFkOP?Z5Lc!eS)!Lh!r>~e`f zvfs&*TOa|py3~v*LFWcG+<_ zO!tji>%7{==TL;0U0A4d)q)cygiZAHuhm*tL~tv7Xl`E9AXiaQ@j!@y!%E`5R-TUC z<;w?FuUYd@h(S;PO@{sxY5sdUiHhs|{rzG6d8MUU&YU^(0-=|Rii-2nyoYlGGsGj| zh0ZpJN=a?O9W5;_^~762c!mfW067y1pB(FH2{0R4mT;_`4_b%MNNXkKYFD4xtXQ=z~y;q4l7ybo+}P^ z*48wkl(&>YyN38xR_ia7ZbwUO-aVh=$2Z9)WeUP3vZx(GKu}y#LNYfOZ{&v;=XsVHF$sy|1%g>pctw{=&6=|W zgXQx?@~};t|7=qe-XU5#x`!FUSnN#6TDh9}sbQN{?%#Yj(5}G)y_UQ2#xk-YA4dlP za^7x=UvJgAw2aV(1|4?~kE@c`A0Lac^!Db!cYXT(>(^C^P>$iAk8?xr^7ra(ot2Cl zqa;&YoO#n0A#~Rh&nGL(k&$8P;Gp&MJCzE4lblDa7Cm`jz_w^~q&?q)d}d*KbQ_hE zg=Lq0d$J~RTlh0#zU@0&LL@An3mE{9zYGp^}LpB zcOV_p`ROA%Iz>l+9Dz+|Tez@p zs#2Tz;vtfl`^B!H!63Ey+%!p_74LajSw@DonuC&@TrO9G7$Gt?R(KB9aV`IU-+f(u zJx&_LKx0Q|#=7aO)+1=Y)39GqP$OR#LLb;75NeX8%3k`N7C+C-CsA<*2hmvSlT0+8 z_~RU6#&Z!T4Grn5IdE_UQr`m0@1xr08|iee3!S{zFz4yz1#jRn{JH%L6Y~x0QQxO^ zQtjV;3mrRczx!UkeAzMnf~MwMazpF9(%$M1A3o$ffI(nwXFcO$30rUO`)e((dmZ{` znWiOo2M5SuVU*>){bM*H1 zN;|>>Ra!{&)J6$P$(uLGZQi`OPcAJ(*gzvRG6|u7(O6p>3{54uA;xWEf)1S;N&44` zUSC4*q+!&p4FARM&iVH3%+`1%cB+DLaabB(32zDS9x;=B_}T>cDmf z&*w)(y2)D=RDP^AE#QkbEr1 z*?uHsL9TUJvFwB{q=c+&Fiuy1VAQwEbts(J%$eMomFpo$7 zcm%?2-+nMD;WyoGCvCZ3st)p2fcE4a|X2&a;n!371 z&r2EKf;W$s!Y8`wv*;7Uae!}{_RHfb=$N*j{m3LLuyMnNGfVSRFFZ%LEl(G|D(Yxq z8TO^u|YqN0`j7Ro}@_K~3bJx57iaE}tzh$M1d`;XR>Duqk*MG@U+g|y^4r1yn zRryXp-#ECqXsE=d#{*0b)O+%DCTL#vpWZ>Ds_Hy#k~6*=6H|Fs8W>AOe{0|1#z7&H z7wob`^{to~x`rby7X#{>0uv;*nD7t1x0OP6bL?2NgRZuA!l@P`gY>Q3+;5;6C?O$n z1ug>{pl%78of{h*)w*V9Wg(+^9WPLN|NifguFiW`2gT>|xkH$%`*zf5Xp>PBpmq*fVm^rs?OB)kE_(HFxe_-Gp$LhfHD>GkxbQ{JP zNrhfxW+mUMs)hzplPU>NA?-$y0k8pMnAS0-H@L-4b4e7Xg z1iUc6hoDM{SQom}Oi{JFhOaRI@7uDxyqxD}3r&YpStjfB_0R0RpJP?C@qWMJo;_Ng zoAEh?bN?AAM>YCu|C0;wWW1+*O?_o~cQ?=T=g;XkY)~^a{~~f~K?2E`E<6+c-#1@&d&H zNk^b0EzT)$lRCOCq>E4!TZ@~N-qs&@`~&IGTk!?cnwpx(`}afejY6TTku?Gtc{N_9 zV;48wCQXfcT#ew2Hn99JHB!!{sq=9+mcu%73gmni;QQ1yHR%ytBWB5kZZ|nR9DAn6 z8C#f@mGxYT($j?s1nIuMiX2vLunkBffBpV((x~;K04WCBteOMa0|DtO`p#~8`t)gY zXmPrD&Sy$PqSbyG8A?``j;pnG75~h=`QRMC zXaD)_>LDCI#PQkLOc@y}sm%mbSYBRkK|Uvc^7QF#99GBmS>ahf&hbDPE#3_tFu)pB#$t~EX^Y}^7ZS3 z?TIfMK72U7RZFD9LygWoDD4Tvz;SU7D+E~wk<18q8|v$0d%j(Hb2c$K;MVAzX?}iw z|7BfG&0CJaZucXa+fxq6)XT-Ra&Fpmg;32dAeovv8gPD=vD5X3$ZX8aZs@jMy?S+` z>?4-qESBQ=vu9T%uVZnG>Me6MyMObN1bw5YdI`;K>NSeaZ>wQ;;U*q)wU%}_2QS{K zrS?jGN`rix9*w7KL$2S_C5h_eCMJ8-63Q)Zm5;H7smUj0Wr>*-yC1Ro60e)3bwZyt zAt7OKXlS+7dKmp^ejB8wJ>S2tQ6pL88b12o^22?v#j{th%xM!tJ=J<xB+; zkV5PG{wy|+_(sL0Au3f+!}FT9V|$*O+mhP`l>(|4BCu!~L5KFbI>vSD)=8N)yBD0h zsU0@Vz{C{e*acLIEV)ZSpySi^=k@K)JVB-F;`-zHFjs(_Ym}<};ExG*oqEEdNYBKi zaq86FkrnYR9?-J4An!yP4JlFNs<*n2>FOd0%VqYk9OpykXPdtjLe$aSA>rX_=H^MJ z#1<|Y@;lM(wFk*uQ1gBHYlf#!3#Xb~TchMX{=hb=ug@C$4pQ4lb>1~yKYU~3xp-kJ z7l2-vYuL53=gvj?&UAS_&S#D|DchaSeO#+w{ZAIOcHn={f@)JHpayyI{2zV)nFP&^tg-1bYBADv3GF^e!=%7V-~P2*pWJu-#*O3$ z4-RGpZ;Ai;RhO^=zpt*LLx_EikzKk*2(u0_V8hf=>v<9T6y46vNaFX6hA>~NyutJx zSxD@#g72)vfwpz|*G1I2T4tqTd=ThJR9nBjmXwsFUGW<-?{ZQ*X!lG^O!VymK`V^K zgOYId>MGzrZ^z(!F}noY0l(b(kxisCt@^X;mGCgTx6I3ou5TbE~~Wk z_w}8fW|pKS5_T#Vmg5tR_qO%4yJ>)uh!*PBKxF)fY^qKmKtyaXR5)tWQ-Xu(=Ze}L z?jHX6ncGQ#)Y{dxp_&8KF*`S9RbNhOqgsN8q@9my0)5JJHV*^?G6`QUby#Gi#W_C6 zR0u0h-z~?Y7Re;9L9?@Sp~TbE6E^x>1nahK!rddOB{P2xt0y@K)%+ct-vQpB6O_C%HNo2O5o{?OSO*3?9jy97|fAx7J+frN?-a)jEI z8#iva%>3+PAFv)B9DEJrzW@CPEH@kwpGL=MzeB_*!-?*s1K4g13=ApU*S19PiHd56 z$`8wKZ;%68k0cwRa9;tg4n*7vd(8FO9%W@7j68_s^Z&f+LBN_dYd*T1rmJyNk~@%+ zA-sC^Y6mVZYE(eD0OyZEQSx z-YTQMG)*};y=#1Y{9NZSQbS@V#~ttx{ZX!SZ(Y`A4`dX1-Jzsmq$%zDOb?<;(3%4H^Tp8Kh!p=X|*6t>^u4x8&aDSBL0BQ zyfFrWOo?z4`@T_iF+Nyf6JID!yK8rDuH^kICmiCooy1&*2ST9&8+GHyurjv> zTMeY1%PT7GSu;lE9WrTJWx9Kai#2F5uiL}HVLgx~%_B#ep6yIIZkusQGQqLKs+qU6 z0#%BBDn*%p*RHlt)$ZbZrKWCR*Gp9Q^!GnLCyu26vbGIty2#!qWo2gaCvmbuLe|v}wGvS^N*pDgqaTBBO;3BueRTcb za!hANI|0iS!lljqjyQR|YWaW7F59fhwZfbKnOzEf=SPfaArnVBaf0{S1A6jvjh2uf z&*8=55BQwgl2&4fx1&p1k1o7I4#Ju3;n z|1PM$g=c0mX%TQbJ z7GVaZrKR7W+dcPpc3Zi&YZW*DP}SI%rlyBSqO6#>S%VxJ$8Z?KuoiGnfSlYv&wSY) zN8AQb{-52il!s-^qD@Urg+xRSJ35Y%mo6%eg!Z%}w`hMm zk%S#=T+=9k$RbdcyXi6aE&PA0r`ou~`%`pge&}jxx%$(mPg2*tj(2-)_^%JEuzEB{Ga09`=(`~|OCKv?{?8B4 zhRC-@sV4XY{O2vm&R_h{)fJACuJ^@@L=cX88zRrtR65ijVI9UNCR89mFECk-(@VhD zD8bRt(yEYH@c;AgM0Hp_0M8IJO0-z}Mn^?YG99=$L4%B7uVkqO)WqB^*Op zUZ0g#$!_Jt;2qfL!I6(>g69 z8=d$-1APVjDC5;rZ@D*$3I@Ct|wpPTRY1DEuaZf3eUVb+Jv-ReJ44Lw^8!f8s` zn1G=)MGW8p+YT>*1i4+KS-VL4y>vv~WQ-X|g2GB#1`n+t-ujcrzolMHqqi~*TZFpI z=U|8-kL5QiMJ+&a578}R49+i7d4yN0+Nu~V<}CDBnbv@|qse34O7%1T$VlHfpEp)$z}iOdPTcvth4PaKkEU_$q$D2M*3e=|65`&8+nw%4JiuX<2D$>!JZl^N200YE zUbk8Q&}vAEnXG&J_ExvD=LiRoXMhRXaigQ7C&*3^u{S%0_$f2=q_Hs(Y@I)h@nEP1 z^~9?tlvo}LE=IM9fHYE@%R4(c0b;8HdS6sTh5*2+ittCq?=h(0X?hI|+J%$ZuEN+Y zU^ei>xjufp6U+nDr=u3IEr{7-&!10Wx=gL%4I*uGJpn8lnKiWWKbx34 zIf>!8pzt{)!|l8?xELkIOb8<)p~K!+QZm~j*o^21{|>GJNdxk8Ok6v+Jr9YVL|i@n z#*NoAG7QUn3!*iD3=#JsBP*6*lqh!u^&1-+-yP+|fw_4`t3y(ja@{aEDP{`;dJ`6l3Qa>gTr|X(=QB|vvIIfmV-eP{p2H4yMyxZG zOupm7bds?yj1)UGbiU|S+J^I87d6z?Eu^>EaAKfUUV|ObEL3pIIRan3dNnX#wGRkY zi=+I5lCa|P@PQj0M#-qjGL-76TA;?mgZQGtobkMX;Pt09UJGb zH*z&TW2JP}_k1N;E-wRkG6cB zt0pW|j;eJLC>6)ylP9CT2n4*k*2F+EAdz;?wC9`&R2Kr=WctdlOQ7swUfxIb7RhSq z+=~^nZ#FHuUl+67m1$oASo+<=pAiuoC8~2UPo;jJG1C)NrI2ah{x4>M?iDi{(mRGh zeXFS^@;;LDf^LIrmK-eEA*(uR2Fm4L=6U=W4z#{_JUIC%B$=jnZt-8s}w`7NeUE`}gk; z%&mR1Hr`&%6zn~Y=IRNThJJ^e_s(6K=nL_6vHDXGbnR{rE=E@k>a6VSsd7%3euv8> zDqd8fIIXJuM)_IEr0hO;Q$b!n6ilWw78ZYD?FbMR&$BN<1L$VBdi~7M@G#yu$_wtc z$lv<=ckbJ#|ItC_7}eo`W3`-yg~bE)mZ_N;D-<)ixw#2=W3?9fj@CYR<~u`UW7#3* zXd#m3@Y!x{4>)V+3#-Vf9jI^vDz^Wexr!QfAqqf9kMG{E-!Y?%W7{?rIApMmc4=%v zRgbA5iS8Ez>E8YO>03W~?NY`YNDY(M=!`C9Sylb{i3S}lEuhdFs4vXTC2QPFxh$cp zrKL%-E9E^+bA&NhHYzRO?+T;jD*~R&McCCJHxs6saG*e_Oq?cNM2@|L*IR~b_Yib@ z`_9e+nIxz*_t!Bi9VXvOl5?ZQq1d}2Wc;v7%hvU@08o?D(liU2+uGQFgx?uH4 zEWW7Gytp_iP=Hx1PkR_PY={;&{=9}OpP2|cCi=wy1R`Ph=pS6Z4oT|0yVKLt>Zp4j zUQh}tZpSJ(p>VQMI|-%xKrjK$efWIxFfz3Bx8LGGBO#{(KtI{J&=;lK_ts2T_ht%+ zWS`Hfb^-nBd;4Ij=>4uCG)zg?#J&S$9^e)MuuDQlZeNBF!^e*wF<)&%shPM&d(dB? zf7AdYOeqyqL2j_d#S94LaTc{KPgjkW@cunWQi}DRMi=t)T*4B zo<545T`Ulgd6P?Sb%Pz7$7e=CS2wqEj{K15m;NhC9C7Ot{UX!yYuHl#y7l9qm@+GJ zMrC4la2uk!GE;_R}fR%STbf-AAW|_va*TF;YLD1s%<;s=6=3M7d^NNc(>Jt9hCD4+# zwnIZcVAqf>Q!2SdZm)*1ukTBDLqwt8^S{%%P|5H(bm&m*SO{sIP~Z{rGEQHb`M}%a04!FH=huM zj0AxK3_1J?1PvL~SEs7ZP(!ihynDqzdO3qQHgoI0tT(FB4e@?)Up0Y468zJTT>q0H zgEQjoZ79`i3nJ1lwOd(Jm8hoUpcZLsJv0iF-?_Sq&AR>? zW$}n85b-Gj@l_%vypVbK8X!=B!)+$_?obbT<6fMsYKmxT@xaqYQq?^+Hny_kE$voe zKY51+&J$6dLW*o2n@0r2@Y#-vD^R=s~`%LT5xE$GVPSlpue<%fk8!EdjnCh zHBg$LoyA}wy5{Pw!A;dUITBbg3+&a2MI?&-a`eCWEvd59yvxHJj5`gZZr?_FZquV= zwH_e&UZJl*c_W2x>&q8ULU+0MOOYEfa#COa(-Y1I36zE=PaAQ2d;9(|W7MTF$xp9L z73Bl1>Y};X*|$DBdd-RRM_?S69_pByc0Hpsbf`~{cHWquzqETGTpjZ^1L^P(dMGcqbVJB?Ek zHzn`-oGdc^NLn(kK2|ocr^f=(!a2*7Otr^&^T$4@1-wmnp#`)Udt0(s9!6BUhTZ7> z$&)AhWpej6brdn-IG@P+8!X%!e}S92({-nIc(^kSkFsCpeclo<7)3fmONqyw5`K^H za?z7Gs3!iFcUogVuhy$Nb~>yW2$IC~%RM&{>Zh}U`N(e|rOWDpycQv8tkp#ypE^=! zO!PXR`=UBaWg>(0gh>axfNMLCqQU2umbafqB6@KEZ$(C)*)lv@%e8G=1wux=gUve3 z9)O`|F_4CP{&4H3Pu~y}Ppp7YfJ!4ftN_XpXym+ygxbm~+4l5DGmtJG;Pv3Zqi%Oc zQV;Q4{_azIhTKor=pp|;OHroqt#3Tt?a?x8BtHy$8${D{x(45Z9#!%l{?o(i>z_ zJ?g+f+8-kWz~OaU-+7~NcN!BE=&GMRUu0vzNCJYFgk&^b>zmNmVh_fJnXwq|?7Y0q zh$pih1ha9vIaptGhIbW>?QShZ`RF`)X+C9z-o?W$OUL?Yq#augpIhP{DhPsKQGdN)noFULDk%2zEJ=a z;6d%pU)7ZV0{sCXsU~AM4o;QZ^K?;JS$*PAuJ@mBPKsB`b48RDH~Xx80`o&Ilrk+ zGZAnN%G%o4d_%+Z#8zUWoIzZBK;TI=?!Q2M{DwiKAQA~7^fq;pu4?~#Y@QW`l zO$p=mxQzE4!rZ_eN=n=PmnV~QJx5PdIDa+zFG~TP$py)fOVRuI0Av0D+zWTFH$YT; zJsY(dUr*Q7)%60iBbDX{`H+Yad)S~e7HzHD&bPqu7eDKE)m*?k1HQ&dj ztr19lFDxSBxV^9H+8fwkyjZy^fdS#6zw$(MA7b%yUVsQ|b&1xS9uyOx?8E1yPUqY+ zz)i!*q&CH zrB(K8ye~=H!m)MhArO0rDXfS#zPxa$?59<0nE>$zn_k7#l)Iy|GvT)QdlE(G%E-GV zIUZareipCoI?jc(jLhxV#yehu?*Te*Q+xXnN%xIc>QuR}{v~VH!c0%I`f85hq3zK< zCqPr(gdfW#cicScyc^gS@wXn1TN?Z?Sis%D4+ovszfIn3!m!zc_+f`EZ69f!*ji9l z@xvyrH4|wxF#x}aALt`iUDOLWc8C%|{6JH4MNzo?F{mlT4*^yO{~!9gdtNtzvxE}J za%UkE#-di-;PDH_GH>n*#?0EC1e*mt9yi=~_-^3k0kC!75ksw}#t&X!9D0)dp{eOArc83lI{yXIL0WdU z*Ncb3hY$Xf3t%cCvwzF3lGBFq;&3%eCp<@9rzlq^k9o33o@L)+C9?y7_NSC#z4u{h zK!jd{Wb~o6mFk+R63>n5XaJl!3qWP}G{^U91O?ew9;Z2R!)EuY_FJk6YjTh!<5(1Z zxwwhD5|cfx!0X(Yf3rOlPt-_BhJ7sBaY!Rx6#k2un2Bv{99PGEozEMl5Jj5f$Ny@O z1NobT?j)2(ZWNSk^2W?q3{tG$DYKW8L_OlE<{km7w`JF0u-nd%V!0iBe3i-SC%`fU z*#N^XS2f7pj*3FQci}?z;ZEq-I5?=<+785z_UVK1-Ok!1hhReS)&stVXt8n;i-_#!K*kYgltkOMD?P4fdY_o2-m`Kss7XPgXZy~(XW z3#BS24wwLrF;qzag4z0ADPcUAQ0)rqP-|^goNe_W=y?D`#hB z27&L49YF9T@FyThEiHOQjZ?#|%$qj{Ajh@>$cW3JPCGZzPiRcYQn8XqR(G(dfi7wI z{5&fFkR6?}7bkWB!X*JnN3{fOzjGb^f0LbcbgGeJ<$x0of(b$&f_~&C{=aalR)gvl zzn|I(*b5B8`r1=wYAtegS;0TmG(hrlI~Wi#D0Dk5ZY{ui7jPI-6-V|vcI9`!oFL^bEIu&p(N3_M5|pA?*?JX zw{9W9!3Vv)Wx*5Q7CxaAFYB_JEIBxJ34KT>q6pfg!sO5u1al-kJ!yNNT!R+5W;|3) zQ3q?e6!B1+#6#hIxS&9usyKKp?HIVjR4(AjCuq18y*I_Xk;!CGcu<1j0+kHEAxi>Y ztn2nE$662ptNs+{?|T_c@Kg-LVj3OWVD_7FMczD$}u7U zx3i-@8x0+IxkfZ-p$dh(Me#~51Y*gSxKSA)jX)ZMKt@*fXz z=35{QcT*FfY(~PY0BrDj7ibn-4D7Q=l$RIpqF!=oaS@C(WPV`Ka9DZ&9*j&8RaRD( zsFskF+wC>jg5G>_cKl4LTsKOBBOof-=3hKDZJC|smSW1>b<^#+NJRzhI(dqe zCdG-Z4NXVQd*>NR`z0iH6soDK2S}#0H=b^RPD{CkMB6h~Zf;Sv`>N^N_dNTsOJrWt}$+QSG9{o zB$(sB;+J{=+ktcI*2qpXfuXx%rVMH%hl$#dHihBHVst^-Ph&R$GD1QKH0W%Zc!4f= zdj7`gB%Kyf;{vVbi05e{!omP&0mOYA$wwfvdq`bf9m6^V+uPa(_2?F<$}3wDKE42J z;O~U#J`9^~1DKDDglTKo6;fL}$wFn>PhG@^G{=GI) zZn;>S3e@*gypmp8ea*=H!4ue8otICCWR{1M*QbVED>1n6a3r61!p-r!B(E!baCq%{ z;Fw){#NrB_j3NvX=mXv$);LK6q7rv-)zFAg$UYN|$x zJTs#-Qoh$5&3$j;E>L~YJd3U^HL;;(Fexy!E$hqcHEtpu8BWQA@7K;Qxyzy2^H!wbNy2`A)_^Pi z3tll{Vs>theVyn@X@15xF;iru$3NCO6~M52NM*HOINiXh+78Om#SUd>*CuNlzL4VG z=oR~q4lPE}feO$r-9baK%zf*4;J^Wb?|k@hplwOAtddd$V6NZO(#<&DaV=4Q?EJ;D3CRgU(`{O zJbqy%Vr5=vjq=3a-jUqq6Ix3SKXiYEXJ6*Nuq=!!V}6rY31i9a+kbv_%*=T8ivN<_ zypz|r4t9eac?Z_ArTPoy)S8tmFS@g%R3>amtjlve{sAMpd%w*z_s>?hmTe^dg_CKK zSfk)Vl-A+^`=LL-ZEjo~|2BO9IabEov)N7+w_ES-*wxb-dpT8s`l!u&&qc>|cV8CJ zyqZ(nM0}muC&!ly3x2G_&LVE{3;mEG+sWyviPhH_5l`Fa*oEp!5(I{bhcR!?m zWkSk#ZqA0QQ$fzLWT5cLuV0CW#}|IM`YyzES5(G1zO9oDOY&aXVz$!YmFuq|y!}&m z&5U{L_wcc;ITse39_?~kW2MEA|8DU_yB|)X-0fXeZQ&J9EXd_6=Kc{kJ$b5PyLB2i znbwP3e3&+jcXR8C`S+SBSI>pls3r8u?5cM7@T~A$S9R^J2e(#UCq5nf zPF$*NrFi?zp|Q>{H&^D*y1Gwir#8I$?FGN!Ve*Pk<9G4!PuRxQf0_*wZ{|`z?M9Uz z;e?5+5~FDg>GAE_g&v*gqXZH-qy8>}n||H;f~}72(RbFrN)n4~j{&QQg9Unf=V}7+bCw_d|DDulnjPlAguBV$RKTEx$`l$AN z-FGcVa6{h>Qq142<-%=R)!HA+ar{0I6I!ginACl2)l{BaqWn@ZR`Ky`;#JQ?71R~% zWzwE*J!UsSY}Oh3Smu2BZTr|?RYfZ6&eIPMv#J!#)x~*+72BJe9O@nwYcW~;DA3^2 zTpaGbq|mKqZv!qnw#;1V;)v^X-(-!vl8*C*3A!lBUNwEskYfI9(ej%w$+M9$%*wvs z=_c+=$E9CHr2SV&@!XmJYPMW(LbD}$B=pj&E(=G$|CB}llQFVDmD~n{t|#B6+Z@r8 zpl?sC+3Yt?1~~mvchHQ zO7&d~Y_^wIRde{wO`7-Iw6`3u*(Vunbl^Un4DQNtdZq4REypjMhZ5B;oO27j%C=`t ze4CfY#R!XXv3}Q(1G4Ht^eXC5B7~z~=svzp1hogwB;3fFP}z$2-DMmJlRAEDC5PGL zb5a=TiT`pRbChmZ^Ylg7a&ThFx00lrL0(pJ*-vzg~#{#`>OH!7l(Es|B8 zw0!TcB%YN(7T>4Q;j^Qm5;&BB111wIt~wD6I)X#QN!$n4@4(MNIxsyCo4P!&A9J5> z137vVVeh+3S<@c|hv(bQ&GB+dihIuti6XMuCw3&X_=n|CkPy3Mv?wFHCm?kPd&q1LME5b&KxRfXg}Rzy3iY1e}{T^ zDz5Lctk1|Z+uZB-c)OzSkl_v_s-?qNgRVI2*;lvwxJx(BmwWI1<)hN->h8sJ&(=Ns z-Z<6><20IBlSl4(f#)a*< zTD0usFY~_N17YezetkAoghlvyswH_>;a~8fs1Qr>th`Z2<*c$vN5k_Zou>jajK z#QT@xydfaV$bE^L#H4XY);r1U0l_yFz&A zOG_*_YzRyV>Egrn+5Xg$6A}!OYzrmI7||mB{pqb}aTbp*kD2k&3x14*QGoBH!`1rI z>@FM@e7Uk*GrK%|DgJ=xHgi8eBb`W@yRoUP2KR5QWXCTndGRvx7p>L&`a&(@Xy}sd zW)I8qBlT~@kF;oC?F=nO{rp^N7TacY4Z47IB3JnRXNWa>a$53H{-5C3%#_z1^~cjD zv4{lx2{Ux8OOLnMY?psHXEsqSy4T3j$774%%&A8#94ibl<7?`M$=#9PYus!JLav(sm6tK<+}kT16jmo(M}3q^#I@ zd7g-Dd0aTudBH>T*Dj7<1I8x~{_HI49Pk}FcK(Bx&)&HUuU{>dEWU?x z+=#=r=CgX4s9M4gJHOZk)A&gPZP!8g$2yKOgygZR<-{>3uPJDL@_7*(9#Jk~AbZr| z@s~8l<7B#*MphEhtO-|%WEg%)rw=JDTBr_CJ--}?9I<2}jBP1Uo9#J;cZZYb7t)sP z<=!cY#L8@_)*4B-*T7M|-_Z-RsO?mgo>^jY?YhYRz7?RR{#`?C0wb*Ms=dBMUN0M>y)^$wK6UxEj-x9G_x50IY0uAu>@0sG5{brsx?T@iSmwxbAJ1J^o#*xgU}0Tb=Z;%__NH#ZqJ`z zh{p6Eic(Ti1dSyvg8?4|x9$PBe(`PuJTnm=LSog}bvqmY5n9issdw++M+e%jW)h(# zgkA!0uUPJin-g?mXk+w(Q~;Ha!1d3PiaT5+LyL=U2}jG`#G{D;lj|ZNtf9=oMH?c{rRIg>ew?TF4-NO%opEPv3$NS73DVncWb)qkTy-WAU{7I8nD5= zdmQdwUJ836Gtp;LuNO;ymOa<-bFD3KZ5y9nd5}SEcuktJMB>L(*gX^(CV=wA0Y3b?ZKwPZv3pHJTp5|uFYve<8Of-)qXJiNn0H|C~mIRLu^feF- z?Qhp9`7O5nXovxwwCSX9^ritG00TdD5F1@NKN*%x%b^5-E-I(&IS3M~f2 zT(1N$1?e5Me0$=*LwLaTD2Y6>Zs!Z1LeC>JdVg48cBV=pZV^ObzST*{hZLjt4FsI@T3gh0A7$P{q%_87wH_3G9j~%oL%|I>A{C3h zAIcAV``2xZQE=q_rADApZQp%?auLk|D#Cv^2fUCWngi~?UvGBmlt3YNKIP?0F7yUO zCELu+&B?m<1#`{i>%(R`7`}P>_!pFdE}i zP2k&7i-~&dme!T?eKIrJzgVF^y*uN#>aC^eoL^Es0`F$29iefQ8x7JigKa2B1S`Cjw2pr zlpXU#D>*bB4C1$it7~a#ojvu+>B|dd5UU8DuX+$c3H_A7E@pBC4RGkzgtt zFW@TLVNqV&DIlPpwcHo~>Uc{?oAR&AxtFnpaK>>|0i(Qd|8biojx1@Urs9Ay9Ty}G zxbJG7>f6W&Y9)-yz9W`knBGggwE}~(>B|PJC*#$BSQBJ{S^`l5*XvM4&w6GLzWeI8 z@}EVZ;ed)>)F?`BZB+qA;+N1rlpAuHk)HlmvXA8>2YFCO`t(w#LP_NSmFidnDB6+a zQ)G&bon4)qy_1utA#XcC!rio6t-$h(ved8Y)sAsErLSNATO7n`#+dg1yp^j4E@ePt z&)VA`h8{C2TmX|LNBsb7wPu?a9ueL%i#|WJ>{Bu`9Xc+fDYxm%7c!XV&k73U-~TY& z!v^MNZIfh!9IBrEX`Ixk-FhjBQOTQYzm|A00rWQcfYa6WBzuls&t+0ss| zJL88)`^6?=??5$WcJNQYef;s}ibaH>E<10*uNkOsw#&Z0W|y|>rK|cVv$+wM?M2Z~$DejnNW_CwWnEO@ zs9IiI(-EIE-pp?kiAL?L!ZN5ctQZ~D>zSAu@BIuKyaZAWKA@K#ghCse)EAw9cKv>w zqpz__c+lD`i{@|eF~7X>@9wAW?QUp|L@NpmUi49v^v?XM7**|pG(vgXq~W7CJBCz` z`+$E`*Jx9gkey8U())du*?KCd+l4Xj8v1etd}MnAd>dZdpSv**DGp;7f9HD!t<#_a z+@<`4ZC$fgQ%{gB*fSZ zpzlEgovwf}L2yRbKDuj`KiZ3kiK)B0OT$;k#IQr*L1ORT08}@LF0R~KmTi!MnNSeV zAcHeZ6lbh$ZFxmSH-Mydik2Ymu3L8%Uy_|IPH2jS6uJujQF{5k9-2Ix=z|Ob@f3fv zNo&ugf-kdsC}@!R$*)X}NdUK?uL9i%7>&Wh;%i=th=`QSwLSy?8g&ejnYoet#@BL% zB2xaIo&{uuqeK^_mbR(nR53@KR$VS)2{}hFpKt#B=|q@!Nr5-psp8pW^ey&D$K8u9 z@fc=9`KsRoU&IT^mapty?*Fi^D5@Hgokei0-Zy9a`T4mXJwj7*p*?SHyfTv_K~Hng zsFLOekE^)>VVm{ytI$(H#)O6^XD4%8KUgE9eOD%rxPT(?c%66B(a7Or7298x3o-RYg3eZjRM-)w1rLYl~)JuT0YGlbC_ea_2v8M`9xQ}n}!4_~-ZBNWAl z8_@(O0nI42ln2?_*)LPl(K$&xw$aT^q36Ed$ES4QUE00~8puP;N!70H@4D;awMIo= z7vTd-7xOI87yc>xGezf)H`)-Ya9VyrvH{yVK+>4B|2g;0p^^Ic;%z7aLZJTLZevpQ zkXrlXtN9_3zW)AN!vobEV9Qza4UMFeB@)s=91`G&<5G*y`gjPX4J^ELCo1z$joRu~ zmX>e0$J0P^M==w6RLp|za4Y2excN&gTkxof?4yqC{3{}=35JS0T_aHdxRYE;iI|MB z8{pyO3@j|%f8xXmX~*DCPoy3<5G^2ReBRfo`ZXsub{k-8U7MgEs$J;qhdhY$nWa)_ zn}JUbM^cn9>2W>H-ofEc$R#-{YBXCotagpq))Gw2Y6q1K@!Fd`&|VV|AV$Om?&U+LTP;B++xE1Q`orKo zM2dO(2=b&?W(VWIy2|S%SXbGtE3&rQiuW#AvV=q;30#eRxb00<)t#-jn8lIGwHELF zQCZan{NBA{8Rj|BEcGmR*bJ7`DMngzPEEpH=mM6D)IT~i{gYTZ!XhH}!R)rS5HL%c zV=XbNYWnY`Q!KR~Pih14qrqZ=HIKZbmpz+mC@FPW(TZ^6^SO_9N#o>nm3APfZ)vY<(88VC)}oNqmw9tDlX~o+htY zu_Ek-KKid{oF^sk+_vqOMgqEq&e`J(q*-W7qmMlF`*%6WB4oSc4V2o3(SLIR(oF*} zBMc43C7^;4V;8Ge=W_nqeRwkl$e2&i%Tmc~#9W7+rDelX{QRBj{Yk|?&e z$>j{N{YaZPr)*yl5gxucta6dQKIgi1{_fp1)dW{`4bkhzz>)DqgYCPN^23sk{LdNr zh=Eu^p&cE?BE(G@fsH@< z`|)F8D{j8tmWes_67A1L)6>2$Mrss}YsR=|<>zn4(6^{45_rgeovT85B=^(z*Y~<} zgr-XSOwL$_jt=8=>)>c%`j~lYf#E69@Nf&VaX7i^mRPIciEFp+3X)}<=qI-DG(R2= z|NcHm{^oMkPYXH94TcsSZocWf=fjn4pR}a;R4AKcRK-?bzR$7k10U&|_Y38jv8BI# zOxJWD*sotaYT8#6(J;btPf6|l>&~AMvx@f}pA~iQ!jCL*CAlyrm(}DzBUo-J=oNm@ z>x&~T2w20vg!4geu0j7+QHD{v0mpDTt0{vIaz+b;R4-hZxN}|2vcN-2>e3+&BD`Tx z#$mZy$Q^0g{Q2|ki*WW|wtfEkrlTBst@G;6MDYr?236k~6Yzh96LUYs>(5VOxKrhq zjXt;FA}~)^2PFGOL7s>1z(V95(wIM%GDPI1zY0d$QQ|3?h88`h_3W>Wg4tnWZA9OD?F_-~b_liBf0UiFEJpqs zt~dU_U$1zjVB?>C;P&}%vwc45775A2of^y z0gr^%7w6pd9`t~UhB75}muYjKos-j5w*5hzX6R@HsdYq_rQonnKIi*^m@(QwAqY-P zC%4HK@QL(NJB=!0wbK=3Xg4;NEQ&nh+c9dikx3#Xm)a^s zXsz#ac5>3^5`8%_ppa7t$~e>j@A>{^cKvB;koz?>bTJ@cDb(+x19$qXKyi(J%|Y^$ zg&+1isvky@>#mOLuI3-#XLTKR?6rBNDoGnLZv&a*^yY;29d0sI(bIQuDSY6|QYC4` zEC?r|-GI`|egVG3SR2l%uxNQfk=>hHSgb^wXp8)_%M4QW*?Umy&s{m@fbG34FxG-K zmj$K{i4!Ndfd8?p&#|s7K)=|WqQF6Dnsf+9J z<0@L)03&8r)L>a$L49r%FY*3#VO<;`NH-osZ`uE*SeR^f$27` z)-cEZ3!}Z^U87(YFdgoEdT}n>-W58YhGV-cmO+}szV|CAG-p^wB?$L9K53^)fVw>V z&1?KfN4w$iJ_d%r7&NyGJm7s_t*nRrlvWm>o(Of_A-76@@w5Jjs?2*+TMo|{NOv>Z zV=_lvM^xi=o5HQ08tiTEA3vJ5#+BTc>E-swx2oUM<<5iesfq!cA1N5RX#Gv&|D1m# zmdjX{G_hS<_)+`7P08xtYVY<{yT3RmxaOecxAwYzc^O(5HpPN0X3RvV!Ul#myw z@BDr;JKe{gbS+by9zI=~-t9Wk_4$3!-u>S@Mz;(-ed5|su{%#zG3=#4_uI&3Yng*z z`F($mttQioi)Khg7nF~G?q}Y6Y4>fWc#)a2Bw`g$%*1KAf88VG$v1c2tx@MxoD>AQDFXg9+nBCJ4U1BPi-cs|GE zP=r>TLk0J{VT6&@3Z6e+%}nyoa4FN=EB0D;C-hsah}V}99IO(a9N)Lqx3^2l`ic(K zb}?uB?(L#oW$#IxC5>qnTzv-=vy>?3ES9Y@)u(O|Kv_(c>2xJHiNBS zm1Qo|Z+QslTlX%nwWQWQDRx_PI(TL=#ec@+*(I$OSuuO+-MF~)^1o1KEVh-t{9Vh{ z_(D463j%$9Gs29dl+=kF6ZQ=DO6roQ*-ZDT=T>q`85>ubUF zB`hE6CBNxMmKW(m)V-dQMFC2OnhV#T^z+?}=Ra@o<<#n`qMxM?o=^39*KK+BH7w+r zpbhmO-^qARYI5FbqSfV`HIXK)g7Q`{NXOU+{>xamEEx*ZHow2N` zj&?KM*b6=-4iOXgj!dj2e)Ex5lgpM~lRTEEW7(1UN1utln%t@UV&b>L&`hJ>0j&r6 zQTg{KME+o6Mhe zrVEdEb~t%ijH*XvUU8iu6*+C7a|C?--Qn8g^Mk&TZ%zIQr;S)6rE#gx;DPYB$nR$D zZ(-{e{%d~jF5lMQ$CC7&iD?hi{!leFUf8bu>(mM>yy4!@hrWgl;coofW<<{DQpc(r z{|E?q-#+VD;^17+c$x9Rgv8k}bwXTs+MD!pM$+*-e@h0H5lkno{k7C_`KqOyOb2?C zv{`gd%d@uqRSfmXw|#d7gnoSbyX@0I&KGs>Q2Xr@LOmNNedS_foGqtqK1^g=Oc=yX z;BmUjfBT16)h)@*GLFR;lqWgqwwlz&<9U0%#Z9NBd%6|lnF@Dh6oO}1aS8wAU!Tt}hHTtk zEiLlp_bvK$u(_~3a3RJ2NYsY+*bIK}CYR&IT#WeHZU8OhkjHp8Z+}fr@4aO7vtg~Y z$-o0VYrb`ccKM4N@!(#nE2h+pc1N~1b%sUdFMT$U;%XgcKGJh1OhCv?!JX4-%w*`h z2d_-ps?z>F*ME;$t5-pbeLUN<7USp`)VcjsQS?HNYFAv?>@Z8Fa7eB?YZK72uKbS6NUJihHDuhMek0A=6j zYW3GguMGP5vAk077wVt?oK9P_T0h;SBPu`lg7~&6Q%(=3sYOJ*CU#`wQ@$xYMPj$v z1s|n+Eb6FzyNJQ7UdB+K7WFMS8}+yPUKOjd?~P)FQV&@%Hn!xokhGA)>P-9X0&_7!GU?45B`?(Ru_JZRysEjIB+yt-tKXd;26S1 zJS(fJc?dY9>Eyga|&(Q4sWHoNlurYUEkYpP@>#lP?LOgVOPS7OpYUBaQ& zN0-;vf!OH^#X`35j>fl78xJqqs?`G8<9_M<4@F|1dk-@*AZCX@BOi3T?On^<%*-Te zDvfC04W>P=O7dKbwH7LE!s6nJ7abg9}^ZT2*jqRfl#hh?#8`@kK{wG2sGY?B|N9NtHM`ySf z*WR1jeJYJFf)ymEV|BFZHDjQ@e;M}n1=r_q{xCbGt38_CkDm4Wz-kIEzA)DBN&0(& z-sOeWKjm7%A}*<6X+UsZbqBv6W96t_KYx z4m_IR=T@;H!^>CjYsl`IEO<6H6sh4QMD^U}Tx3(#tR5!$fki9}FI=MDSPNHQsg5`e z3&>?g-toGlsP>y>B(A9ZCGYl4Vtp%*w~jqJTiyNPB}=pQR=LMf%^!C(OEQ92ue9bUcaKxl+xS~u4!&%B~)`yIRsC@NGzgf>6dj{aXZUqlL*-jTpXEU|QO zWU8LC2;9t-JDLzuXVRkhfSEeMQ0;F%o)<7PEp~v-a@disKk;{_uSl@t&4xF!KNqil z?VBQE`bNSn%-p>uP>jT>&8HYF>e)L?D>o)_)+G7JouZBfaHpQ>?iD9z9X~_~lJp|t z$MPC;wUM}`Z=ZfUZ5jP*8pVZ1!SBoTp{AE7S?f%O@6B{8m-A8^JzSnI4z=~NI#_;t z_px94d>>XP&PJ?(=E3BzC3t0HzT;cHH$D$V7K1o6Uqx53eRO=5K=;%oF8%3os-IL~0o9+`kj`-1(r@Xy9!*2u1qOR2MN1~U#d`^*31!VYArR|iTTkTi)N+|APqli-#h)wQillRx+2D##pJRQ5rVYt4!nuT3mnK) z%shFAdBLE`f6YR}l(9E%Te-OTUyk1$iLSi}Bdy&y1v>!&P9Uh02BQSsSEHyzEnY>54=X*|5v>%(o(U@@OApYhR9wj;WqR?`T7> z&F8%HD;CQcmYX>{ii{baD_qy+9jE;5^f>Z(A~OALDyn`HTdjw3u^8-g9V)eBXi;(W z7NPePHD@FxY2AMgKZMHPZ+ z&Z=!{Hdei|8SCSJ@EQhn96wOuW{}0qUI?@i9aj!rDz)y@r{1B?;^-)g7had3K470; zo`~wuhKFBDg2rRG;*dMHVURWPgEsgH8)cV)7lCc?Nu2M;iQak8PHHfF%`H{*m}#AN z`6H`Z5U&sXSXHp;#n!JNjF^TxOFr~s*aTyrK%yvAYJDtgMfP20*cmgqqOPqOk$YLD>MLage_L)vt=Qwf65n+7`N+yL+wa)B z(I)r=zL~KmMm}OEU5mH0N&??3AH_G_7H2W7Bjv8)n?QPcjWPseXe*^EaFrVyyPl%yJF3iuxvUjR>` z9CcS&1G=fw&&uy_P2b74hj*cHc{wkxwTs)YYIdtJ-+tjyl1vx&TQtF=z9H(D>nEiJrg`IA%ypbbj#2iR7?=lOsM zYimdy#QV(Cu4joqmHjpz6Z70^<2B$N>6;&^vnQIfaN>n9OMNoe+npA4Fij3=9P0q30YmHr|8|6Ph^Ju+K5S zgV^5JcLRyy0S=6)?GQ7~{TCepN*xeD43sY?+&AExfgQIM$v8R&A9xHoEcJ#c>lv4kTKXIsqO*St`T~O z7;F&`0$U7P`~r%KW>c0CH6P&=b^a!uppjL(UM1ln>ouDw(dZx4T5Z6aBB|bo^9ODY0-{(>a&ns}gzf_^7Bchfw*8_< z1WzSDmdtX#v!91cVg*rp??T9Ex%P@j=h(T~!t9 zM?!tiV|#XrMp!T0Dv}#kMtwQ8Hb*N3WDvuNpm4o!IY%@g!K}YHmAza@RaZCE<{p-5 zxSl0WH++$V6Hfk@JD2MHx#9)Lozx2AF%!NaR+luF=7s0=vqLO0E70i>)k=Y8)nd*QnBGaK zv6kMf|0G;v6rEx&5rBp+)e;CrDilH}#L&`bu}}%bDajMD^0dRkY;ZHQ?nY`qa{t~f z;!lV$5=xgiz##ECn@F3VJ^L%RYB1X&Hp5r;@JbUK(R)Y;4V)-~{y?wjo zepluu{iCexr>2pUkbugbm{4ooZOPxHYo$Qi-G{FkvYHzR~OzwBT*YO|LhetQMJDi0k3kwIv|p{hc2tw5L!We*Z3!Ug*`dS;M8lpBqd7hD9H zb>d^t3Yq&DvvS;}dwHAV*Vz~G^$8;*Eo~)b(b1zvA*F!uN=PqJ{9|KdH;J+dJwEy^ z7!~Ye+^L`3IO)}&wSpw$_C1~q!occTe`$ME8 zwRv-I`|CQ!E)fwMJzY${_9rLW%fyhc6iP`-N?Uu=)$6rH?;d`_tD+>y3^vn7U{(vi zM$FtIUkA^kOUF9Uk&6cHak{!xmqVv3#-Z40>P(OpFVc^}T0n3SbM6IwLng)OHOz#759k^pJzLNHgP z04hJs&o>##FO&w#jE4#i!o$qWqv5aUH*eCeFXN=c7$VudMG5~BqVE$LNJ36`vYgF z@+2Y7$(rg(-ALtb#ND~GC(Dx(c zD5`8mlJiZ0m>A`sWt!OZKoOQWixEe`oA4NHy$SiU{T!F{Qw{b%s)sS8Obq*P3=S4T zTn<1t{vE=v^pTx$aEriD=Rs_r%a<=B81A?e9xk|N4+)e21bs2Fm6)loGi!!2=Lwk@ zBw6zX7s)t(rsMDgAF8CWaRWMFC=yhlB|-RieVMX6|8Fh;Zp+it6S9eUl%H`cs1?J{ z_IwQ?^2DlNj|-TPA~y{0!I8s$_ys3?<3IXLm;@9f>`?rn%7=@GG!jlhqz;(Y*Aka@ zZkE4&=T74FMCXr>=kxLL2?)V+PnF{m9!!ol!+Y?qrSCE!!djB$3VI%lJ6KlTgs3(r zkbWUB8^zJuCe0JP*0_w_Hwp#=NYD!b(_KemK@$fL5V@q`%-ukfUHQ9idMUa?e|(hjbaUV`Yc(z7{A&+xF8anuB{|cc{aPPFU??NfKvfp znn;*NI`4M>SMWPcTQ750gAFxDAcCqKFvv(v{0NavG`nt^@JvBr1uq<@qZ)*2=^SWW zqicDIRN8k6YZ<-(P`5G}a1lZ6F0=>%s+1D~rE;68 zceh(CKgbe&=0;QzONC~YV3|u6ElO?(Z~8|+@BRdyw|;2&m@(KgLL~#Jk;1X~O^t>t zP*QBPIscK(s^lNJZ7z$c>6CXzMA)EoR4dIkMIjWj5FNf0eIn9_rTLQ}i;$U+WlQc9_gAsC-9}dz9*6!ujK12@S%2;`Aw%?9_H0z4Sc|YR z{Ym2d+|oks?&hyEO$!_Lkwr4J-kBd2F3=_v}@hb=0MG)A!%@H_D!SR=PO zZa9Xe^glU5)kwi3*c`5hQ}2*48}u|+eBb>aAw5IG-Ri{BfBc`NpUNz7`}Q_6T|n|S zTD#0ddxZ>jj!RP$V^`?J!1<^N=p=+zjYH!uwuvibETAW$nezhl#$a6-H6BhOl%!)H zK@15?43fPGq0yR=lv(#02-#ra=%jDA7S?Y{%C#z6UVW~!Z>#CylQ*jL!tBqS8A?u+ zce_^^Y7c%<*B^}4?)pC$mztTml+(`l%T*F;?xmdVppWigzP;3^>$c}Fo*|)2jtK;q zNu~Dhb^bh$vmJ_GZ)VSa=;68tef($yp2G{cB7MejsiLQMCx1zyMW)TZx9ewR`qtj5 zJh^!>533+je`!3zfmsFx2L?9q}}+02-~KMaG5u5$UIp1yX15zwLqO|}(PHrX~SZ$bh; zrCQ(AG;3p631CU`QmC^Zl8aNNwP5(v>jK<+ggB8u-0B zXdiQT(WGv;-K~nMH*e-~G7W;D^-XianD_U-KI)auBpz8%L6I9z>4-uur zP*{k7(Gj&+Zv9)dXpy`pgr+!+H3N<1@mnGKuVXYsZo87Av^2B`mjIR~5?DM;>w!iP z(2&^Z95hd|DDu6D0{JVpwDo}0e;UwVk+*piVA=Vilw->a`m4a1%6^n&_1wSgjl=oh zKL{Q?%stY#pHwg>%b@W&_%eHlGo1q>T&s`Hld6MRC_!ey zoP#m{PWUZS{6YQPLCpk>dCVPTe$y(-U;nO+L=*d=U!1+Vu z3W#;Y&v2tg`|{OAvGHFZlT>L!yeHS|F8xwlON=)PBNl z2TE8vknDyb&?Kz6u7dlI^t?+ED(^@$Ao_EaM98&(ptceb7?L+cxt_EBG&Dz1C#z6! zEm9FrF^b*U{mGhy=pJm-hBc7+A88Z-+bPt=QNg z2u~BN1B5;3*%&&UUoHr*Xr}S;)~L|TeVTD>d-m&|NB4YFjITp1YdJwcM6zX(NobW2 zWRYAK7<3|q8UNdt+5YX@RgeZavO^;dxw>JE)8p9zl~j(n$g zYAP!8=o|wB1H{yp$@>>TPgFVJC}1kAEmVTtiFbzO0ZFAt!R}DgA#tK`hCAE}GeH# zHig$>4oMK7hE&6FOb(JZg4d3OX?pB3B-0>M(rfJU>FF{g*gJBQ9)vYpTUimkis_kN z_D&8#f{vlY4;xs7GYM;mZ)y~J!c-NxIAWl(3_2cqKY$8+K_tcML?*|Tk+;LTzPcJ7 zt&SHfPwGnOLAFV7(dW;fVd#@^&Jy-}boEUiD>GcuHiG0XOXt9po_&|c~2;8NqSJpG-}?DVu!_`KOcznY!0teg|YYp^y6&;UXpJqenqd2xNF~f>3JDv*`e; zFW@$~v}i9d8>Q&#w7@as{JVUGK4>1xGD(j^^b^6U=ZS4lL4$*twlH%08;eA_n zmw)rjdHIr0b>)n!8U;JB>3LS=Y*IeGv5|$r(u;Hq20lEu4@Sfk|IDIklc-y_Cn%6T zq7QXea{U?|-7Y2k4TgnaLaT7h#{fii?rmsJ8!~Dn9A)ie z>TLCFYC)H%*CiXQ1Vrvq;x4&1tZev3OLoS4;+Z~#h zUgyu-n^@tW1NA}ejMzHd;@QS*crmOQr}<>7=>Z7;&w_&=(lp~NGWkytxaA|qUX-BZ z+C!U->>i>ibsaNgFMOvqV=^E?GtSX9G}PP`2T_71A$UdZeiC==i#;O)Sz-NG5%#A~ z%Tw9ckzij@Z|+C+bm&k;azuWm<{Z&=;EfjmEIso_AmXKb3!x9y9pbmkc=vlBU{Hgs zsE7!L+zg;LQ4srzi{G-ezWpt~F^rK>l+ln^+q!%A(fSlc3&$t3sEjgtwjb67Nz1zB zwjof5Gs8qQI37%2zFWDQ0G`Rag)1(!y!6@fZ$7MTXaIPY(Xo5tnb}(9s5z7-9QA~< zo#y}f`EIwUG!2=oz4k$Mz>8Q5t##+|0VVij`9RF;wGeSGy zc#a7)%~F0h1N4V73!tgwpa`B!(GXAcvCwE@_Ti+i000f{1xYphQd6$e78J*9&Chvz z{wE8#WbwaQ!1fO*5bi^{0^Mdp?(a(R{k0>iGPIu~;MI$xdejqpXYd?lJ04G+NxA`Y z{A3LdH@6xihV#jlt5&W|uB1TQ5Oo{1bWP`Le!KU8-vLwUuXutrELEP=Ic5dRlx;h9 zeDJRMz^XjKg*kOu&w<##R}O*KZPrF%WW0D`#_J+-H6?B?JzvWeA)RF;mU8vLj(`54 zKt%qlGiIo4H44qa&2!;m7Kgm zcUoYEq(pKtrc{>P_V`(W+r(h0z{6#CAE4!L$k2(t`uVfjsAgYZpMGPeV~*fvzGgVv zr37c+(lYn+EBYIBQW3RK%J6**w%b3s*sqk!Xs)ewVV%5@Wu>nDD{Ef_7XIIa10DsR ze1Ct?(>{9-OzY@frgk5kw9MhXz+eRedy#Z{5`N|h5yq1``NeV@Yl+ZZKCGGl5JJ1k z5E>>5e61rN)oEwX-hcY^>}R1wq7O+h5_EMXZSHFQhYKvKSbKI9%CAs_Om@xzK!QA^=!CQC`IMGrpuVWvBi(2$jpNtqe@^@~vAMx^&g0u0+i zG(8Ave@}$R)lP>@ZejFVurSG^tDHvv3k0s$SBO$8e~)#Imd-Ko{*%7eQQ4@7z~W}_ z3yR$)$o}|I_C$3-li58q*1*aSD%w!9j%bgr2s9SwDtF`6pzhsJ;UP?Y_8x~y%nDpr zI|$DLoA)&|{HxL_W;oxb1NdcmN6RH6T9B8VuZ6 zs3#1u4K1sC#Ppb`f3T9P&OPg!YwN0BCTc+Zs2h8>o2ia=b+rl6z1}=~P0M6x?un>? z?W*I?o)VX#?nT9mk)t4!+!{79Yt`Cn5nx&IuisT-r`a-B{E4PjbXB2oQ4Aq!_6!jC zL*k83kL5&aBJ%q@vY+ZlfYq)gx3xhplA|E4srT?)967U@y4NjF*f1<4B*Z3Xz6|Ym znekU?Q1~gOM_x5q`_uW`587NwHa^`9R(4F!_?ek(Tz>^L1O$EuuYD47#gW)q9@56{XdYn@Dnq^Kk`?Y z0GT7tmt$rX936Crh(Z?qdYlI=-4kIxB7o9R&yHgd6_aGk0hJQavxtZ)heXF91sz6c zE?x(ELex!V=g@33Dbe;dM4*R@-D32DUYC~(DJsSi=ycEu>kZTm#Hi9`s8HAx>PLOS zNj0T`mKz8(yhx!0gaB%{<0b({;xVA`0DO#gDZDUv9jc?r_WMV$rB&&BuNneP1VmzI z$NXd&9S>0I5}k0*1n>qKFvGdpjI5M`k`6wy0zyQs=Vo5Hv**P`j$jY}2@53s8??)w7j|8AOZB zMn%)p+AnL4_aXtD+Ho&N8rmWA(fw1UZ}$=Tz=F#_GJDg18r?RNaktub;^m+S;E?#d z<229;j6~Sx`StWbGst^W#}B1ep6%s^bjB>DxJhXTi&snT8l_7nx~08cjLg0 zzTVyvAn0c%emfBm2f??Hu8`P#(^_eS;LJgeA2kLH$cT>a0y+omF$id11O|nRkan8l+Psn?aHj+E|TW8By>5fBJ$cMCeiZR(D0$#vc{joR_o>_6pU=M zg36Om-aY&Qei}U}D0ZZC3oq}!RN0)n28`6u|Xovw~^#gq3m zA@RY!{0S1bm>Pq(<&hkYw;DoH`j}?~VXrGGsb-V?MsRH%?{(-YaoG`N4|ziKWG~}q z+%3i~gdTS@Q|bn8Zn8~ZKF&j|{Xf@=&sV+4=4BrV%^g-(8*05={qa0)tJOnZ=VNYAwMc(0&Qd7S24hX1%5-^FHmW zSo`+Ugp|IouM#HZU>O~R8?Q3HUn8U4pTvyJXL^f(yGZ?`ty#v3Q#`kp;*Y~LxgYpg zuR_Q7(~9@zj{f_g*4VGCKFe-x&B)gWo`pV1hE3#Qs_4>-OBXHrf`XJtpCxx=><-MO zWtb~O-EiSpbTZI6z;x**S8)S`T^;Z&VkGp*C6a2?ZaEKajt9Z~oSY9V!m>}{*Z!-R z$j=}o-f@CsYW1;M^y}MPo65stb?o zO@MO~Z+7uB0qdp=Btu`EW7vj~B7FsqGi4{sL+m5q0CM*%MyJjmU;DK>=+*7YhTP8U z+K-5&LVv~eSRU*USDoC2Uqp4Gjv;v&k%-`&Wo8aWHllaFOz@iNGN+17aGXgs;6olD z_&T!Uw)nlDga8oj(P5yzw!NXSa|_wLMwWeH%hBByFW#TMt*@t7W%n3agq(MwzFjZi z;st`OPrZVFl*j*&%Jcs7Gcx^EF2h-*Ef8i^63V8gR8U%;wfUw7?n5^WwDvdKHCwzZ z2nK-arFr)OKjUz_SGVR2jLMKa!y+LNErA?*A+`eJcnYcXl&0AF8#~$sUZ2^U=Je3n z!C_1Ib5j!&qCg;^`?V{xiJ1v~hbU6%H#K;dW{MoZvr4-!wnBoN=IME~;Jyn=3NS{2 zh_D?97y4b~4@hYdxIe!*N3E}x|M|zV@I3ckm)568+S^e-W0v%Ya2rC6+^UAA*UYWb z;k74sM5hi?Ac_e{^UTfH4(CrCJYSqxvEtP zSDgF`A%|;h`?zQK<1}KQ#99UiBc5zDJY?MqC7nXJ;`FTVy8%XA@TemKvhr1Pi$T%%h zI+r@Mr=r)7HVcaEQV1}Bk({~d#V&&D`6t*on7Sci=iG-%GMEc50wDSfml>E%q1&hi z`VRVugm?V)KJBS%i+G*=<)X-aVho z^ifCl2HeegjxRu0{`BX1okshAT`^finl_KHa{wn9{kwpf?`VeohoW3_$WePmFtssU zide`&J0+ORFr=yv(H%sv03Df7KPe5H@8C>>hXNsoWMF{5tQ79O>KZFwWF|3tC6@ry o;Jx6#@^k-U0ha6}50NL*{{R30 literal 0 HcmV?d00001 diff --git a/docs/images/observability_overview_in_ocm.png b/docs/images/observability_overview_in_ocm.png new file mode 100644 index 0000000000000000000000000000000000000000..b9291fa4e795b3007657656aef3b47cfcf9b1f9a GIT binary patch literal 38201 zcmeFZ2UJsA*EYHV6%++g5fuT;QI2#)Q3%D30)ir;mnhP!bO-^gs0i^W(u;}|LkT_f z1QbzPK%|ByNDWDZkOT-J{|e`v_x;}QzW0v%|MwgB|Lz!fkD=LN@3r?{bImo^^E`9z zJiTtHyKT$9Ef554yL#oKF$8TKgdl-#p-tc~p|iiofsf7JS1f!XNO%YDAK$m3uvG{; z0A0O!&Lk*hZpiNW{;6QuS*%okjU?$nQ}PALH1k-B_co!YSFlfP5}qDtelac{6RPl= ze1^c5&ZW=0@}hnhI45IR)^|>x9Ti#M*|xnxfVXp2;Qv1bnBl*JT_Pm69&#%h zaFsGVCZ*;KZ7uGgNT>JS3#P0FldeDs9xZGZrFv9#6Jpmrn49>yhVjeWA`VWfdE=rt zL?i|t&HT+<5b>L$!}{>P;o}cn-*Jv0NUiVedlLzhEbpL{KR-lRweTp5AzNqpq2DUgLy%=BAYm}oKEst6HQum1oO;kx;SK56MI$F%nU2+^ByonKl@XiC#U z21r^#5z7a8CdkP{LY0>*p z3POm0j%=#7d_5S#7g2^gCK-cRWRsAWYj5q|CA7Pc1MMPwbxsc4LIk}8MM@lJp4(W^~M>e z$B(GF`90qv3WpjyAp}$N`Va(-?&A-P;M-j_r0O7?DAb=j;yWEyu;pi<89H6w@CcVP z%aRu!Dp>b)gdynP(e3rYihj!S{W_*Y4YKdf4=3;wVpuc9M(p21K-_y#`}GQBkcBbT zvAMQtsW+n~Jme`K@FlmYU^kH$6;+%}Dg^SbQF{ky<8@#bVlgAME61dMx9Wr-SkE|; zId`K>1R^Bmcng|WM+qn;Va#~pm5_4the)BC#7#Y9#=F|8LRahSr<77cyiUc3Y!`)E zA3Mvke!U1RHc8ni2od)01h;5Qbd6kDbj>ZZ-Z)qEsunlQ1iWu8 zyZcjmj@%H>`?6!Hd2&NU6TI*cAA&iDQF;M9|H9@$K37cIoDm$=kKN^Qr$H82*ZIiK zE(Sm2T|E1T<#ZZ`-AVU%c}w!Y1&0zuJ8x6F8evicF`J->{zJesY(?5`!rKCD`fQ&f z>LfaJPa!wVmUU>g@U7l^I&fnTZq2$ ziED;SuDS5Xvl5uyC#EnR96j;xx(@7*aB`weHzzXsG-Qz760h8(Ry{G!2O(;Q@BD-| zRqgu`1n*qSbiFS$e%Kw4>-p$!rg&w@Lh^kl0PF2J_1G#td)eBy2VQA#%N>gQApQZYkj-oQOG5D-+lB$SAk`gs7Q>p%yq4 zM^rXL4f_5XuJ*RHHt#m)xm2stJs1H3v10zlxG}t1)I$uMm{tI|ESXom5)4G&dZ;{q?Wy@*R((b6UD7h($1r-JuB8zQVly$8eC~-GaVq> zlNL3h)&Fw=Gb4dn9+oIEJcfd0F6uD`urU_IThibZqvwfhxs5(`T8?24dVah*mT9z` zc50G!zH*SSCEVdHiSsLAIqYDjpV>Z@_KdXLG(2`+6B`q`VE#Fz*4RLTzSOY6%lGBo z{ijgjD01#TvdJ~PMtulWxNWzD1e=S-=H*$J4DF>`hgai@ix+j%l%JRJ~Puzx^p3JaGxYw^=ci&!FsP1H&nwl0Y4;NKb z>>)2tbz^10cWE19Nc7&15}4zm4qxaHwDnDclw+4Ha3a=Zpy|f%=_x-`3(75V;;^6} zb+e4|LDNSoZpA$l@UHOO5S`#EmMK0x|2(D5$I8EB%)Lx{=Tp{ADU&l6NbFti*C5Y^ z^thJ!;YBSu%;f$6cD%Y7Wg3|z5gzWy5WSkv*WWAbCo*WZPPzPzgtt#R{B3Qaajb)UAy zs)xW?6s+XBNjQ4Q(WHAUIGO2u3VZT)@)=j;>*$p*m9I?7qtG}61!f~BEj(gtVX-$T zU__(Gts^k4CI$ll$I;)sBm2dFu%<9)gB0N-!GhFBavN-O!VGs*M7{#6L;a~zPC`&H zoDotmh>nw$!BW$6Fhw-UV&C94_nx4$Z&KD?cbn1-n6k3l!g5U&mbIAY$(m~O%O7JvfiCd>-31IV0;L|0?j}h?U93 z=&<^Mcr)rRo12O@3_k8JM;5DqVU>PIuvBv!`=ZvTmrCHoz%Wz?W}Nr?P`EwZ&l4&<7N6K{ao8wj&~jAtPI>A zWkxxlX0NQLaaOwN>PiJGyFW{gc|+R_W>b@mvDG;~eY-%iaPls;UF1vin|c=t1S33& zy6gG4dXs{3xx``RPqx8tT}zn5vpuxlx{8VlOPg4~X|z%6DCDootu zU)j<1IJYs97T6_>aURLTP1BhVI(rFtc2>;==J31wq1NPAkBV#c;ZL=w_ig+q3@Kl4 zpvHSEYc%o2J1Adg&pRStT#|9EE{>fkUj-Q28K!=!YVV-!sHlwDi(rj(o3Qb;ef=NY#jmX*HhpAKP6 z7DX%iLr}nP_dpczk-ht}=Rk&K+Z}s>ag1w7KIP6)8{0G*GIw~fs76%c)?HN9xey7O zf~VUo+!l_)W)|mJ$J;Z87vZ$UtCn2`rO%7yYLjR-443AqmYLk+$8_*ll}?`ia0-nK zE$DW#l6Fq~&5sYU<)UQVg4$7UA}KBBC)GM^S`pt$NUs|h zNnf~ds;v%tN#XM&HUzB#@a{-@x>^2y%Pdx6mX8A>_4!lROyWk=3N+ODzjvFGJH@i^ zV%>tfLv^e-E(i-4(qdBUb(=O+;42is>m~bd%fF^=+csW)Fh8J9>(KT+ z<#jUcFj8aCR7b5gch9(+h=_@xmQUHRjX?XEJ%OXQ^LsR?sJlh|V{b0ep!yApFQLA3 zm=~;qF+vE+YWG`Oj=<2za@ALhGRhC;>;>YqJd@8jS7M*T&hGCfgJlV2ua%H)USc;)!yxo)J-5A>nRG@$u; zzAjydWj4lN&aR|@Rk5*IibJVOkcW~Hz^LQh_xi}?uRHBf(fZFR{TXoi32z zeE_TGhVEJNsQ|X=qhAvt9*vg_T4)wGCJHhoA;Lq;A zF(@L~7jPTE(1)~_Dg&sa^$*OlZb9vU()#MnOdc~AFDE{FASi09vUajwEyiWwbG;_u zi;xOBP4C#yk5}n#fmyNb245kC%vkC4`wRp-R3p=GaH(rM=}eZ3q@b^^{$Ad&K+X|c z09&6NP+l8T&azv2WS79Yl_0FM(uw|N6u#2+AuRXwaFBfnJ$XcPqg#U`dmbI061MOv zeUB5AaGWQhOb%3!aHxyrxLS|l$XiOG;7kLZrFzkrIDQb-mSm zP|2l#-1=|j<-es+^1sgk))lGA@7=j(UGezUiNzH%mc5fVh1Rtj1oF}^&ZnJ5?Jvm;Sh3Go|OV^COe(Vtt&1b!-64JyYdK-Z}*HGCGM!8PC{Dv z+Oln*w32O2ADct1rcpWWmI@_XNqkU*Bd^a0Nv);P2b?!rm;o8&T+uLb?)!(E(OB!! zKfc2lmY*?aS$MDtu`#&ZDD2TY4fjU=%TZ%JS#ITlsSA9EYiBHaTvn`SI(i6I*X zWDQfMAB`?E#BS^6q>?^;s<~a2qe-m&VXrOra7vElQ^y(!53ll@F{Uw&6Rk6)$$RUO zCPVS>_;s?Pr`~<_ttXn?$qPa9*@P@98@A54-yM2zHrx+L9 zSC*fOgtv^IkX|ASbQz&Fb;F+>wdl_t2nE{?d^ss=_e9Neo@Fc{ z2y1o$M%wq+0Jp^DI1?pgqC0AK-9kS6K5kk4{#~s;M#*s>R_eQO*r4XvsUqpl6$k6w zGLhFF=1 zx1(+hs2tD=`%GJ&zj~3B7%J91_z`99#ke)bD5?>1x0eQ!z=B0c0nUh9O)%R)nNRDf z(l$_1vQECVJ8B9yjRCd!C3}Zt?J^SU{6U$_)?OLkj7?y+Owvznd#rD$SUA zTw7jeD^{pG-QM$L^t4Q=y8mqK4MC{yD6gMTKu&1H5!@fgkjuTw<;mIVA&br$e$&_S zc;spt@!1u4P7Ii|=}3;#UA2p8%s4C0 z#GoM`tE`+wtWM9T)fQG-_E#VLYUt2L2vG+l7hu}A@4|yNki~wB2a~}(Ofzbh<&+kj zCG*8e%(%&~vmqy}HsVBSyNBCCqOjPNUI9ArDF7pi=FUk3w_&3Zuok|MR^$cf% z>O7BMdpJIUjB1h&{`tq+iszj*21^7|dCpq_BBT^>OHT{c1=}ACtUas{@)|sZh_M

aRvjS11In9 z`sp9Vl0Fx{Y+hMMPUOB}Nz*Fb-|nKxhM{Npp!IAD%)G#nIE#-To3{>@#)ZHGZNkF( zRu9$0yn#)^u`x#Q*THfj+@{b?Qd^(QG`lW1vv%MHsSX|W5$3p@0p(tg4tvQ&ZVvD1 zpTwO2F%D+^u+O1F>&k|9M!CGC82m>kJ~TE&lyU3OZ^T2NWC9N$^3{-Q5R}MUauAX7 zxdV~#JMj`GC`Pv#TYP3>kzn={w(jY! z)`An$&)j`C@==R_;jnH@9y=PeYWN_Q)wH^)P5%6h&|BX=nU)xD>6B?tXP{7Npq%2839S%hYUUG_&l{N?`aLqC8*-;6&yW5G5b>OrWh%5BsDYV1bR!bnqy_xQ^eAeaW$&%`Uy%ihFP3y zAo3%e6&Q|V(TQtw#1K}-p|wbz)mt=HkjG&72z$5}sTor5q>GlmHo{$FA<<3bu`taz z%oetJkKJmc-Qpy@J+Mv{#Vkp$bxiX@k*$zjpEO&RKc9O*3}2(y&McfPdTt*1VruW$ zTq-eiQgw#LU88klpc7jQf=R z1=+Lb^Tfi^VSxoK^SyITtA~Ck(xY9{gC`T+vYswC(bTUPb=TiBH$){R7(HB?C_Om2 zm%M2mlI8q#{Xy|;QjtG&9Rx@EfSO|oc)X_E66O6xh^&hUDZ%k~TeRewxeLnNV!M0N z=R!2oug=1k!@XNkdG?-vyz1SXV8p|g2r8UnpSI6fvBqTMyrln;$+GXBc8RQc zM^qdv6`70gs%7j2543XX^z>s%≺8+lH@ne9Rr;uGkr&D7kj2S>brt7&o>!5?Qbh zO;}O%3U2X-Q}GNdy#KZ7)ZC8oqSc@clgsl%uk4^Dls z{GzEj=GAGjciT#Q_;UQ>$8@_p>LmQvD4Sd5MeGak0}^%d+>A;{C!^BQ2_?_%Aa+sVK*a>fpY6rW+nhEs)t? zeU8-kcP&5?4&T|v99X9JD*nj#LXs-EoC`81+TC`ch=RvHF5G(8Xke16yFCqwak?R8 z_n0a6P#2k?U{(^3jdHndOVnanE1P49s`nT7H|y?6N0x*CF4-*y6>MGP)AKq3U?!v^ z!FdrsK*@~#CTOf@cjBZ$#+frFW#>cM1wR??IES>h_IpXt;AarSm?gD(tij16=jSV) zxR|7ZRmdkP^hdXKbQ-ORm|&Nuo2IQ3ryPMG?1{j+S1S9|+QxMzgW_wGi&I`DBrOQwZ^OR}FyQJGDfi z!oBM`6U1$?*mypqNTE@hWzF5@kAfRX15*DtNw1pD>hx#Vwa(M`m+KZv{Rk5IqT65Q zW)?mm^x2pA6R?47TuMcw=2)fBYKwz5v(KKxP`^8NF=YB}7=#0z}%s9PNY+!23icnjIxWS;$#!!)O^kU!YB! z&Cggl3TQn>MSdm*3|1|Y#iZ(9mwt=%6xc~8vh35ul5`8#W^=Jj&kwWR(!^*a?`hWs zj5lIpaUd{w*uAT&sR^33)MU?Ha%-Jj>ki{^$KR^~&ky~}nH8x`?k9dw52 znSb2cUYnjsT*P$vw#2b%`0zFfxi$&w=wI4IS?MU@HcNYc-?9mo;^fc5lAq? zpP+%baF#xN$6!58hZyI)(hX=#CoLUl?F~h>7!+v2aGufw4-chby zG_DGoAM2R13+FKW7_;3TZ1ZyDa#y)D$i{w-l`p@v%4jRJ#XEr9QQ|hwcpIQD(-K$Z5Dz6 zj~%wqzHk5y&%0nzFKwS>2g{i-Tn=5XThJVDeXwK^1J)t;BRs~LSFgny>`GoD+i^n% z!1PzX7^BA$8q1i~I{!l`YaSzS(DJgp~C&};GPkTR~1iW2}7Xl-; z1@Tb6a?F74HqH+V3VZC3i=U|UAvD%s@)G*Gf ziTQvjM^Ah@^OLn38=W4k&=TBqW-_>`Do$|U`7lLbJc)JVof8QPr}T#9OPJ{Du-1hf z{MLS7@c_;AOA*rI6#wz_V<&{LI&rkA2+f}HKXj#M@}~-yEer?(WLlTJRrU zn#|9VUW>V%Ex%BTK7ISehZ);p;uIHS_-5%_NR#hP9xUq6;rLzjJfc_aZK+xE>t~}{ zxV8^yxSXw3`OpWf({mRST0HKYpb7wJ)x78!A?CxS%bOJ}=8_I0(5`dE7|m`>{^2L# zlZj5>{y40=;(_p~(0V$E)UuVNKT8j`oku?G?M%y@YqEXDNWIvqIw?`xbGRlZINDA2 zCT(*%yP!$qVsgCm;aAAgDTZO0KL%@Ou#+Cm_(IFwMMnq6(_oWQf}k(~Vfp9n4?T~u zsypLqkt(Hgcr_ZcAoO7T&|3==_ohbJcx7l2(J37jF!na4S2x0rsf2D?;AqmezxAa? zr}Z9-OH7x+r6kyyG&C(1wce#TEF}E2(%vrC(#99mD&KQ_GF9~CPi>|crdNO=#HKeI zj~hg+)IZgv4StsWT;*;D%-r>+n-YN9Zs1-E?)iZ&7Tb*P)%diY)Rga_s^cC33e$n~}i=w#AWb1HT!gBH9zeek`vwon`wUFgp zZd3IzqhL6?pH)jkVFPnRSAY)OgqrIb2kc8UJGRc;>Me{#BLzAR^m69shU%VdDmTU-L6nZ($@wByv}Lss0#GtUyEUV z0a3PyNhShIBpu;R_htH7f8h7~cuP3g{!LBJdI|%YZ1>gn;azv#h^@xewLkxl-2oT) zpDMBC6@hLoADkIXF|InD>31fx6qe~1`Ii53MS^kF3uBE}#u_{VqGn!5R}9psj-m<7 z4~?GHbBRaq-g78i=8VM1=1>{Xhr`~HNfU;$0Xb;lzI&$ztcF6kfnX@jZt58|dVCnF zyRCA|=|i2UCdg+cphe6dEU*bQ_=w;kKb<R03D(uqOdwM3N%7Gd;5?3zWf^|gX zM4&V=gTT!qxBWi1^+spi;7m$Eov0A$Ui34Xfav?!x{|?}{il;aTTov>3@C`l=zVuH z6bollhi0^R+P zc&~`N!*zun)s5$>`qICQ2tFG{;Q`=NA(h77D(L>ml;n2f^<-9}N3O_JaRP3^3O#vA zqpArG&4|04Aw!WO$j@O{D)?{Sx!$)Xqy6jNV!HO?L2%x|y9d^|OJ8(DbcEIYMm#{K zoGCPzXw_L#>w36r=Pl%%x$dbgXLlY7LJI_yrOv^CX|uPg7(~kb(n$TtT+-hD!+ifq z0q66fGE33iB^e~{{6lKXrah1&nvfb?ZcVk;?UT+9=Rc_!Q0VrSO@nI<-J?hd5}1=^ zzfe%0!o1Ca_OxT6c^c!pp@f6{fs@AmMekq>Nc0vHSTDixS%;QVjrkc*!Yw4+saorW z*`fO?BZ3iayPSyM2ilgkAxQP|XnRe_89Z&NAy-GfP<(96+1x34TdL~i4~I?EquHD5^O@CIsYMp(r} zgEJu@r|h*@#Yg6}Oau#9MWl=-&>kWXvG<_2lQe}$Q&)Pisj&N*aOM$lk3vpqyg>N_ zE)qhh0nc&+!#rLi%2&9}wYxNC6+|Dx^Z)X)d4fMk@mN}0?+=NZO7@-~Yh0XA4p-OW zEOsR`5Aw+F4A9a0zdPJ9!`f05r+67Hw9Oz$a!s8^^+PJrx2w&=(w}&QA1hCtpg$<@ zEt1OjvWoQRP43$*YMJH%_~EOn`YU{-BMrY~(}LdN79*uYoNx9V3V2~Z1&b@`ak%*V zcJdO)!DYXaHHt}5%X%sxR+P6gei!CHYclJn;nG`RLk^L#I{10^Tc$q1sGfQt`NREQ zAPd2ILCWcsVal0KJMN8h0q!^LN&85h#>3^G)v5-|J^idSJ%eGltm@)uN&p#(+=HnY z&zbjJqsKVfY-lR#s!ThJ*F2!2H}^(|mywfNweqSxHRMawsvWVHE}~14e|~%_=lPj=ynz*RDr7gQrOz` zZ{dUXToMgLTxd?qy>UaF^G#Gr#VUsDCzhhQO3=FI{9`h zpf4Psss*iy;3ndJ_fLz#dxKZ$Wcs2-4J`z_Ru<#`Q2O{a17Z(YggtGkeuf}$H*Hf_ ziT0^3u0cIxo^bh6&+$P@k~$aWsw!mQvkvSMhI>?RaZP?-9C{$;vtJQP3E3jLqNGBt z*3WZF(`|*7Qzlr)Nj^|SG$?F-ZnCN9mV<-C=+EFOb@3nWCQlj_;S{~*v(?f)B1v{} zM(XJF%G-*x4RNri%OEd%SA@cS^3s2rGQk2=(mWA|POzXk}1G+U`xwdZ);gJJnLE2>`rkmwDMY1y`| zrrg?;b;DhhUIxD{`4V@nNby7D=!Dvxm#W=rRf^KMn*71IY(7$Fg`F4vre{mW#`ZH} zpL_9kjZ+RPjmYss!EP`Kayc z0lVz8HO?<3DABeywcpV~NZF_;%CyXrS7b|Eyw(MgKnA~v>+u_0hKKDVN?2=gHs_PFa&`tSr z{a{io4mV*Bvv@^XI7jNtnM70kO_-tsaoNX5Y;Ien#_uF=r$6xsp=PEN)e04XZgobS zO1SdMc8@~j*n@zi7FTAmtn1vbZMiAWrWR_(PA&UQ_MJmJTsmcnF*G7($2N@zTc}^L zrDMfO#w%ZBuI&O;Z(_?-Ds;}Q_q&J z7Zm5bo3>3?xoCvBJ%+(8{Z5jaaP%^dy-s!vw+~; zs({LmmRM^ma!}m8a|%xRw}KUP2I>ur=XG4A4N{#C`<9?L_PO|6Le~A7E~MPnOv9zm zmki4t9Z21;?(-gRGdNuy^nIZmX_eON(!p>~x@{#2b?gVR>f0OPsK%Ia%YfZ2QeD$7 zNk)Ov-A!6)EiJzXs?clRebcaP{X_xba26i&T5C*phE=t@NP8L8n>%^i#xPZ*+beY^ zY_(`LG$~M|x&^~Ap1U~JT(jUuGl6$DBBO#Yid#OnSxx&GaX0-!o{{ZZF1Bs}`y`h& zsXVczel-jilDF6E!%$3AM%srETvPsP#_FPRwbrkO5Acn@`s7BhW<|Mxs%-K=KX~av zf8yjdCG1ldHQ!)f$x`TbWV%IgILsy30B1d+J!?>UA^}bLCJ5?QhMB*YUkgr_g!fl6 z1~20p85525vqFm((!BO{@;j;GCO%Yvr++5-1+;JJH$^n+oQaj7iT(z#KqeI`8 z6rT>+4J#pPv)#Q=_Nd!K}=Vry-j^j@}{6dcjmIhhoq?s{4+Ktx%6Nak~ zM78^n*XHI#lhxw>FbX85(@meBas$}c?rHf6I5IlhSWiavIl*u{DZVf4dG0Q>hxPE< zwpo8^QhLp{f$8#!x*DMjGHEJ^Q>Z4NT&ZYusU>;U#Nuvdx$EP2QM_GPr5z;>liHb(RfFie{7EC0 zyuT&S4ChodhYg*O=U~bf=FPLsdv9T*dt*@)vP5i~#Ea;uX4XEeB<_JUv1!3RbwMPg zpf5LSX0~MrQ{>126i8T~!mK@(3Cs90aGf7kUqQ}$FDDd3A-z40JlE^j1T*t+mW zWZ-gBz!@2XJ>`lrjG-gp@y%p$bd%;dNPRugkxr%X$axjjZ~2iYuI$xi@5-(j zZm06K>n?3Q&Dpw5hChCTjla1w5MbFo76%jr5m0gwU=a=&LbXxQs zJ?U~?KM|xn+bBT>)fmN_w1$t|nF}&{w80Xm;mC)liWQwQ)aYzTk_GY{cmG&X?s4>+ z0f=9NH)tHQ#SxaM%cO<1#r*8t%ER=HPvQp*`{&lStJ4a>C(8#g?R1T80 z^jG0)Q#D_5^(p0B0{f(OxY~uqV~4?^U2Ci3uiauQ**qM5ezaXvBXz`g4w364SZ=J? z2lM!Ki?k@fT2gw`EY+vSrNc&!#WM$tt0~V~&(uV@L_2O-hg9FL zB3~Ofv-yUY%eYcZ-?`>*%(ULW$ubojurgjJ54==Is59J6OzI{yMMa6_kaLl4zPt@OwS48 z#PKH`9PZlcRCtfsjWSpvpI8saPPxup$3wG$`O#1+a;6xtWF^stsSd4%qBFuGh}tps z+12ockve9XkBC_=O+aF+0@Ru&8Jg8vof&|10oig_md@7;V)u}P>)i3QqEZ%4S>Zjd zZbFf+Z9>>nbW;j<#v%0V6NjNGgilM0(~$IppHdzX+g3nKM-3im@XI~V@`X=_ z>Du<|1CA?6*wmOyqk08GJ(x!=Jvz8XKn$; zwh8rNMSJxA45T8sfqMNW!yml`SMj716b9TYZr+A?c;=dBL)Ek(%5tORoOL7Xo}!~6U< zYRI4Ue60Y{>_43j>mrOChE()m(2f+3pUT4kR3Xx{iGeip+fHqGomY7QGKTfAi z(!`*IBOv^gM@zXyc+k(5A-JNde?7y$tIR}@E)shNaqI7aWdlZ9m@57IN;yBc*POFb z5_mJ5KBbC!~m5EK{^A@Y_SHfo~9TNoVs%lPs>0E+w7&YmRw{uL4`Nh zH8E8x*NGNp@1RbG)yJ!zc62P9&~Mmu$H*NWQ8>TV+Kt&7! z{>Nqh`Y6WH9an0eDA$}lv12b$c)PRh&q1M!_^Wp|>=0JoWTkMq#P^-`g)xgw9epu# zeHMG%`|tF$UbICeucd0@22qCTzT~tu*7B81U&BCuxIa9Vit*B7AOmzWHc(k@!O3V! z%w>AogL?3>d5}mX$%yuw@j<5RGFbUXRF_H#ap-bN)y$i`az?+=3*|hWWT#lzW0ae ztJe0>Gac7B!sj8#O_%3Lq|Ebe=xc7(A5V0z1>FifS>>z`L1aVTA*(z4T*C~SV)O3L zTuSF&AO}E*n<+&{0R5{5*PQb3F>NdrfCEbNhL@`b+t#zffl%Dc{W73?Lhf6@Bj}gF z_E9X^;42~PI}+WDo%t>wG}g`2;FnSnTwVXpaiEdY6rMjILM zAF{9t%e5Ko?+@e;C6oysVlz z;N}sj_=_jMgT(ff=}L6n!fWhY0e)>|y|HY1s1K+=OTIKJLih&ESKR|vGpO(wughRt zHwJG90f8A8=jf$5+=2jNGiU*RXMk{G=oAoD0cc!z4Y2bsM&unqKu&oVp}6V$um9(a zAEf*MB%8o+@&D~H;EOr(FVwN;Zw&EYcE5iP_Fq9t)$`9An=zmM1b1OpF#?q z8|6WLE!4Mb-7tYC2g;a0KZ>~a_Wf4gg9ESQIV;$B?FvsK$LDP~S@FL2ZQ!+-DVX7X zDP;bi-|vk55&pu95_zHEJjBg)uN>-h?j7KbjM}4@ys|+MRsZ92f+37@lk zycSv2SpXwJgn{+qEU@!u{o;RW$GZ?D@V|IJWyivepn1f#zwsPz0zsofUg2y~wWMdo ztrMZ3UMf+20c40~JWuZPPv@>qWidW6~+l$0RR2s#u3u z@`ZchPFSL0mf1L-%c0SDZrh4#t6^QoazKrm{S-fBn#9xPMgEAuZd}(e`Rz z+URQGo3OMRwOjZODahP`=VfOu;!k-tG(LQueWd=*ed5~(p=zWcP3=9rq}Tqr>Jp{F zn)zri%{?sNboOBVxZP5NgzX9HA+BifoPf*{pzqdu4|D6^-Qgw=g->qwg~|k zZ2A;jJF{OZIzY9LpMcRTG^@Y0F??uo{*u1$`$r+J&e@y%XK%7THeifWO{@2=;NxQ_ zb6i@#8On}3rtRGk=4j!jK3nv1VgFuye#;Q{wN1;*gXpPc3`5J!(nE?G%pi~kh7)aT z@l372E*0Elyn~K=e{mgM`-HUYD}Qq`&I3LyHO_rI)U>l0x1-o>I?P)9mvR+_#L*uA zxolak*-ND}(nv0jBc4qzBleW>#PVt*D57Vt8}L_0H!JFJ@Koxnbn(6;&-HVMg5@Ch zj}->WA8T2|FqdT#W=hEnziPk_D!nFE<4aB3?Kf+x{^mRGRk1jHBoIEbR%G@NMq5~qW^z&D}Q$=&RL!}O39CVN0%}ugs zhk-H$3(*U;x}f03nU}LvEiLf?G8PU3f9Vrm$Ft{#~I;pMP>=@x06J!Bc-WdnN0m-&Ylt8v1zs}9al!- zy#OP!R>v2?$O4O~e#vQRnt#`~6XpDct~-=Wos(6HM)=S0**pp;F@X~A8f#>}o;h0M zW+f?mQPXA50A{pGQSb!0$W$7uGM`g5(3EI;zu9%!mR#&{Bz1dfLu*Ze$J>%-Xeb zY?Oe9K10Eba(X$@1s+-8FQ!1h(!#QwtSWpFwHvOHoo^rhohu9#qIjUB%cd(>c<|oq zk}JXSTPTiKug7~U9URGv_ywv6te@hnPmP#)SbAJZW(2-W8X@ITW4<(|ZTPWww+qAW zUNDo(eE0e8n#@3yY`@I6hMGHjcl^w@Zo6?Vl{m`Tn<(6AT zZ8B=~N=!B#}Uh1MKn`FYxGU1BejN!K=~=8HlUhJh@VDDcy2r^P!!)h_0yAH>$+4vbXvT zHJ1d&<%l;N-~$UX7jk-9Hy=?=e@K#rcj>94T{jMT?X^){qua`DO_j~mDy9}&dsw7x z$T)WJ&@W9(3SK;irN7s|c4Z>JbdGAc#C?G(Jk~=KgU}bS)nb5f*VgHe?x*&a1E%D@RfC+s6ysgT znmRYFT!=UQx>%yw-CT3N=hYRVv|nYG22N6~a@ntZ4sF(w_s(xOWy@9M8){T%UC_V9 z9vkq7<(Bl<4H%LAWtYRh1gtP|c&$W3}f%e z@O!Y2-d*oiJGH7@dJe_bL8_+``JrSQ29&;ZIy&Ccshq{o0r=~R;XaeFt7+c1!SH>_p+d+4ClkU4IPQKCjqsq`my} zPQprGdwY9uGRj|bl|vip^dCAMBD~n+i$xCUg?d7TRk5*r2$u~Mh;*zzWcCj6of<6n zy`85nz|W(<>+#CfvN3vXrJ0w%4QNehZ=mL3i>eX z2&%>k^Pz}GE#;+p@bI4bHt032}NKd~uq=ik7>l%gGc=I?|} z|3tOxB!%i+>PF##qU7_u`T3vQAzYZB$7}zF{(nV~bwEY=t3F{J3;%07@6UQ05KooN zm%*P2S?hqWI-|4TFUIi?XwL%{VB!=p|R1(4k5JVfdKSA%h!~ExV>+6KW zKhYo}k(#uQf!%m4h`0R{L;nqXBUDWfLHd66&3~fWzd=hrU@&X5*tKT0z8&G5-tgK5 zuf<0aw$o{V#jFz-cenL)A0O1`3iP?~I!bPY_`qOp9Cp?1Xu*aRv? zZu~leLjl%DUSB7`<^IKg*ZoiefHx=)1IXy=a8PLEwha*B(0bnzly9-MkmGkR2Nja5 z*j-7JN;RQjZ z>)(A1^qVwENT$=VQyF=HzM$4;a)Tcf0cW1|>Mx*HQ9Z4;LJ``3b7vmpH@_=hk)6rb z>KdULgP)yRKL`w+qvM{qbfQY+vShrLgK`upORp0Ls(gJ%J*RM0$q}ysOpB z3V=amJ>KbnDtpgnDfZLcCSlmttx(=Z-l=yTH^%WrP+|!v9Jxw2P&b1OVntr{Ln<)# za3js=0!y>p?W3)$m!>w!e<;68kUZ57h9_o*Mg;U(BEB{Sf@I(FKvNMAKU6*X3&8V9 zUeISfnz+qAT~)v~>yk(TLd*{vx58Bb6G2QdPbziqy}vEu+F?JP!NqrMl<(yb=8Gx7 z57ucl0-<_7Fz|o0_nuKrt$Vw7D2j+++sYCImIYZ#Q;JBpE7Dvby#y6x(FKGMAPFj> zYoVa@j*S|cl+X#tQlv%=AwUQLDFFfr5JCtcvqtKsn~s7jqrV3y48n8N&b5O={BI1V0~nsj}FV~qoAI0YJ|rwM3?yn{{T-<>W_ov z;8ddS`p_E|DDa42Vu+5zJ5$zTb)W_PuVz#%QKaMm$hr7{#iB6aK5r3YxdCF zU~suyaS9GZCdB!aH-~Cea0FdLCpgLO(}t90ecnL{g57Za>`NxOBcIwBL29vbBZ!aO zjsb({;jk>}>hf$+24#>!;af=^hJ}XaI27o-tl$Am0{T#K{6@rbr_wfhg**6^Mtn+y zZ~2HEd*)OV7rZ-jA>N7c&1OhpC=!2gA>z?$P2vKJi*NejVx;L7B0lowTeUZ~JKVvC z(W44Gxoz@S(J0-}0%`9d*ZO1rHgWqyS+kWaqqBaCB3rWoc&?6*y6UL<^v6Fdiz_ma zZJ`fM4Sw~d{1UP!D3oLe#94u|lv9DY>#XDFKEUvlvKMl!Ywuqfo5*5)_`NLC^>W}I z;Z|qtMmKh(dEP_4>Jg?j9=Y<4?gmJTlJ$U3+;QXU~e#~0lV)9?#8-NCaFc(;_^l@zaQb*+ExsjFh; z*AC2PBbemo1!Zu32G#H~6g_cDBz-3405Aq!n8yr~R|WE{uN<|JG_FdYiy;7Z2W^gh z`5>u5t}Hkc%uy>mvPAsiVSe15Q8wV0Byz)ovRn7yC}V%cSJ`84m ztAA=0sRi7UQKfJG{t_D4A_0Cb<-6ZbK==$^7KYh_{TJ|g5c`1jg2xB8VgSLx|HXLw z-$@(bjsMAz{C~|Mt%sENV!GRR8;=;W`mM=mC~uBFQXh;l+Mm0*QqU++Sj3d?8py)} zkjzYV2VH?*v_W7G6hjHTJ_VJ_mA;3JIGvWEjIa)UUqD&gUohSn-`z;fMIk|!PjIg` z*~gWc@nYlwYV9fN9GtJ)fh3VeF~X*zDv7Of7Hv%;@pjE~Hi=`6A$cu@`zt8`8A{BZ zGKH=u^y19K2S}~F4;}K2$9;|eG@DR{a&EA*8Ic9t5voz`;y>mto}X3+9b6RQ!T84~ zCL-nVAm9{;M{U@5Z>HlS^SN!ik*CFJ@!VnjiWq89EV15iZrir8Fj4ybol^RX*T*); zcca$EVmU!Ag}e2C;k+|mfA_e8d;xOJ7#Fn%0Dth1c0ieL7s2o{iY|t8{tBxIodHq5#f7De6AlKkA@#la^uw&el~#$&7|(vXtFxaa8g zASHM<)G^OBadxU+VA2$uvLR8I?USLs!MtE*k1?}&xuj}(+fDG5y5?GsK?l8hb6y%? zsj+-R1{^o?M0Z}oD85(!mm-m5%=_LFkz})Dc$}7Ja)U1Y;f?83s)j#*?90PWZ&TA{ z(fLD0KnrmF-e9Qc^4+SLkqJDp?i&U$atP79>)Y|;s0mrhxkF4~U#y!vz=C;yD)`&1 zCK3g_QVqK!$3zle@&Wu`?>R^Bqn0Gf~AQ%z59ovXnRn9dduu*tH zf>!IRxvCN)3az)!ypHK zi!-o`<=oec)bO{T1(Rc}0 zZy`#PZG|H| zyu?4RD0a5yFnmLx=f1CrkkWJA1pcD;1m01xQx2ovXL3bK4N@d_!c{Iw z?$rd9S2SmCU#fDfEnr>K@w?($YIUmm#`$C^tK6_3e|9nsR8BI!$2fTzW}HW7#z1)k z?`tS_X*v5hqM_+mAH7k5rPSwG#&Vi+o20eJ9=1<{+a!Lu{uQn|a=VQuj^o@ZcMfz^ zcAm(yei_b?h5X#AOL(1*xh|MDVmAyQdyIwpW|Sjw2J%$5gxT*uY>$=+vJv@^1)a`1 z_Ps)-U7y0~>!j$~m3}p%CWT04jGQSPaentDAJb1gPWjd6OJ$Z184EdJdv=}Y0EGpy7-6Kpt~yA< zRXRxTV-6+UG(uR61a+&5!I(+Ti>9-s2DwM1MYxL9`=+TMnt({Gi8d||M-a}JQ#Ad1 z8+lnisVC!!)Y~a}(}^CPS~tvVC!711t7*UHHG2=ITk@G9qm`?u29 z9D;a6O`2IZ*G#`V(W6@&Y0;;db1?ioC6_mpD`%jA&JU|u@Ad31L7o_H)yvi9=o>SD z`rUb#x2ThAddS02MjNeEYSMz)l)_1bfpS;_9(Rq=c+yj(sDjw*2^#li_YPeM74b6>`K4de1h5? zq7!y;sa@t-_T}VzFB&p!t21!h$U^6XkjmkK$1b-l-YeS|%d{&pukV14|IXLF zt$$y!e_Kmg5j=a3UT!X-(qNy;y%EtE5aR^fZIn2tshn#K-S{D`WQ=}pE_i~|k&o7% z+exeY$cP`%L<%ov1aNg-rH?9y`KIdNZ0b|J{aoWi@UN>ru10U9Tgnb9-71aU#Y_qL zjkEbhi-|~JM5|oeR?A8o=qXU*24NLaLwRg=qXC6p#oKp@a3L_CKuIB_nywZ!e9h(6 zs(toESssJ%>RW?)^`NCe?_S}a&)rp_))U0N>GLhD0jJ%4=KEhOJ+rLH;WIKm3*x6b z>)_^G!_s~F6JMjd%kb1U%`fd?q<&AEOq`?a_**(pYYX5j+q&Z}kRg@(>ECi%%z zg?p+T`xRgqBO5wjA3e|eKV(?U|eH1Yg%!jS`*5?;w_^VxYuM#?-3 zmC7$l%IUmP<^U^fJvF?7d#SZr{Iy{YL!}RUr@P*c)KN9pMDKNw$7P!>1j7-J*4}(v z^&h@o>i88$dwD>heb@(rvD%i(d1QGy#|>e7?X>R97iLvXW`RGE3j|lBiPGsy#Fo1? zy|mUcg=aWfbtO#f2nNa`bIT1&m`wnn|DhN^_1;h@0;}kuS|!DC^E-aC4i#X~5&+ZE z!nGd?y37P(-=fxxjyh=l_aV!o#Ai zZrZH>hB&(mUc1fth!5wGRVgvvs)p>OE7r^Rh4wwAW_wOX*7UvD{|t9ZG1%UO=z!}D z-(8hbOX(m4nsx|h0|mF9;_f3Uk(|thb{EqB|n$SGj)<}kffqR{7L1PkFS|_rtSP> zeb+GFrt?LybS_p}_2DZIH2Y@h1h?CX`3$Jn?#r7&58qHaeUur7-BzlpJ|oqqN)j}* zu}8W>+&P-Ll3ssiop?zZl#Rz~N%qRSJoL~DSwPxNxiokc)G58E6itXg>A}fwVe{@eenY89g~NV_;(L@B9E#cb zco?INcCI7pZ=Gy{^I^2XT*0##%&1DRUZ(K6%16Ri=ksiEIUwD{i9Z?7U(-7HI(x|$ z6$2?b_E5i-5`8BE-zsK105lzmi|mq*%Z3~3{Hcie3*f20C%?XKI&_Nv-CU$Wl<0@uixB4&`-i`P9)YuzU%54YT-ZF9?*DOKG@4M^ zoXvP!Y!O;>f}UE?$vpWf8Bq6KwT_?I9!=5dHS(vldOtm^K2;WyhWXM3okm<`_iVK092uN6I|oqru)R(6AxV26gKR`tS6g38n#hJ{x%YfAcD1>@`x?QZEYF7S;J%4F|Z_1<8zzW^5bE!2>A^1i$bqltF#SovHiqbt4 zzDY>Hv=1ObbG$>lMg|U?QS4bwM3;q|d}RuDUc6oGh(Ips61B_vOAN?5YF>VNe%jSK zl40u!_Pf4)T>b%sQg(^bFk%r%J-NGlTeb)oGlZ|Fr|0<1!0_c}q7SF~AL^*NqD+3D zB{In59hkcL?wUT0FBXEuoSq?sF-A0!F|_1cIF z<*yTAMAwLg}Alw`RB(H>hS1)Z7sEK-?;WhzT9r;|AD{6b37zTf| zL&V14heRZ3Wa$P^&r>$_QPhOdGkh$687jXSz+b>-UzK16YsRsrkug10Koff!%O`n& z6Zc~t8axiTA(EQdo7z4HF&)YNA3WcSR<$UN=vm}SnT>sVu-Zp|Kk?+JU?GxP5)XET zvJ*6(rUiCriW8Dz4OU;O}n2w_yvwiFdZNqhdER-G(_| z2=vMu+j{QgZr1!Orvji_&V&X(LiProwC>D72dzkDV)niyc4* zK|&n!6(#App@m&L*S}md(r_pg=yhiSAyrbR*U(*I?Rav|PJssU;%7U=wqB zGGOu}gYp>mn$bpo<~B`znXokGttc|nYj^QDS5k^(QdRXz-wl{FpTK!!3ABX7z+o?y z<(!8s5YR0YIco~dFgp(Y>?N5qriZ1PvIGLcMJT3&^RX|OqtE=JwVbi}u(d>;28Ygc z7@qG8hMVVPhE#+Mf^{O|fjKy%f<0b<*wNl~UGAQ=9l%*k-Uu&&^T})ueA&ysQ4=qxr&LFWwTQ zg{F=e<}g1#w^kjGcZ@S|g?SKz`kUsQo0F;QFtn!w-i==9rP^RI%+3fzU-pT)GOAfI zDoQ{7I?^P+gXMkF^~mb)Kih1gUQ!)Dlkee5Z=jWRN{14X${#=3*$}^e5xN4I5?JWj zf>ov%vrj7sV)JI>sF+lh6o@0R%(u?-mD1l4G==)b?>D@8J1d$zJFg{VF1%cecYv5>)d$NYCa5r~VMjQjkK4CGI4bi zD+RA&Qw}Ysr^l95bgj$^+YJa(+2pbcrkgaWkHzu$4q`WSbNMl|J|Qr;EQN`AZU;l6 z=)t*}5mKqaw~xFm}f6V9I0sz z6GxvyG`K?-80^+5^d8;sh4XAAgMFf7IwUR)^dVzt_ok7ejHTiw`_EoDbEouc*%F=` zS}k3X-tzD=?BVoKbK?o>#_1Q0!$x0%;kF|w8nGnA>SHr3QLp&wi{NfUT@^}frXYrxB>#Cj7$xOOdI6CIGOEBHFbc(9msn@wN?4>4X z2BrH?m4Ag6>q2Hw4hpLtjWg1r7rFl7H@^~%Y{yq#f`}7_15+W$C^Z;#IWfMfwxvD$ z2XQ;U1}lfj)-86faS6fAn{q#RM9FUubXU?e zow*`yi+I z>)w!#RIN&McT2SSZP47EmN!Ko#V3tqcYh#rNN=+Z3c=5)a3^hjeZ)tq*mHhG+J$R6cg z*S?R1tw7&RpNPDzkk|6H(My?mIO!IRyJ3WuUePN&XMtXs=u;BA8!~29IzbZ2(%W&v zE7SRFVTd;E*Jl&mop+KScXoPOcn2TNNgUdcKdF676(5|C-a!-9D_?+}4^OM!*=?X% zYNn*xwOr+09twJjqC|4In$Y=_0#~R&Ic-p5h`Cu4sKG8#J)U>C4!R7r+{n>sP2Eev zT~>;7gS%`nr@5cE7`?fO`j)3?MvDJ>vr7mMT|M#^k=Ng1&ntP)NSRB_qq$U+2U;_A zD=GI}htz!>iW#Xr=oad38a~gdf{|vMqB`NOYpUkRQow)HBfVe8&*f9ybk%S-sn1x> z$`guBePK4WhSb2SKHbJygCMbB9YcOM%(5$Mnt2@mWo5nlRYIONZ)0@(vUo}3Ea#7W zZS?Mfo`U>bk5Zt_rg$6rIx0CRDZz2A>-djyDTOT+*2uh;3a8kgZL0~!D0R5X!_X%~ zx`l;tUK*~181S4C?jlA5URYm1$f*!6+$1_DrG@PId#bb3%dw1lytY}j>CtS~`}~^W z-)mFUkK+4G?vrI)RfpH{dfw?bEE@Lb+o2nGCQC)|0@e0FN)jXUZTF<%=1wJk>!Pk5 z%_Vb2`mvwxd9~9-M|kTSEErkw)dI9FG=e=OXge2Qo9R5dZ}-jePFTrYx{gt`o3dz4g5f)Y}ww0hBbo2nL-AkYdv`j@j6mHyU+8rq4pB(_u6b=$12 zQl`!FWRPje^oV-%zc|vH;r z#Om4WxV4ZAg^tBamvJjxt}o~_U8}BB&a^H$_i+YGw%b;zXv81ENvOjdFR8>bod?ap zzU`5MC65S4=**?w4xt^89y>Y9P<(5p!ez{q-ag+|#`Qb;ks!M?d&MsS@kaH7Ra@9< zrYOkCL0s4cG)e4|HbH;2mjaq7G|qX8kgM3!k=|F*nc z@G@_lUH3ZD_;h+cU*w#N0Eaz+ND0B~r#@{+ly$S2)dKs~1Gi-D`@D;c(2s9mEFaB^ zej8FKJ^-(gFX!d2`#TwSU9@kk(c;KU3eg7$*4fYg6tu(aT3yfGNUld2Z z0Fwc*Rw#0~a8LQjLOO03iNAY|>vs5PMI7KLx!06J+fF>3I0R?vSbl`Ja?R>(%hj_qLq_yFQrgRz%}Z z1YvM!sQwSrfaDGgVq0r5fF2eONBK&VWC3q*;^N@Kc%-y#bZd4grm8B@XjwPRuchVZnER3(M^_!tm1Tks9(@{UHioSM?xAv;FKsSmkT1MQ!KZv(T% z;3;s(?)OWtN?VrbnWJsHOMnZ+DPTSC0R>b3R5Ep7LBKb-$!6CQbcU-mB<+Efa}7R& zbX8@)NPZko_JfLp3mBS#3E~sa@;?s~8suzaTje%CHzuVFU2hb?w-6_bV^zH?AF{LM zYDl*~f$czVnXb_z4=BG3#qmG30cxjWAz$@1V^2ds63VdINx>deQ}|~y z#E0?Wuvsw)V1J9j<}=8)Qw}~}&Z}5(zzD!Vgl!3joVKfJ`^_S0o14h8e^4dn_0WI# z-SRx+Gjxdy_WOy3oXJGxisQ~+Ljw@V{o#D7Nh5%9*p|3#|H{1sH`G!_O7H2HoCS2y zE2pYeHk*+dT9-ba6;hM1ud-nO>%*r9lwQ&{9aaNhu zHXUilcdytE7V6Ku!XhXSW(0pDEiSuK)3dRxn<-t|o>xE91-Q}HPP6X&7B>Rv3!$f0mlX)l~n<^NU{@BKF)oI*avY#@E-Yr1_rOx zEu0VYEhoX%DIi(zZFi+XoxnCW^UGey+32k|SAmx&y6RN^&-aP|8fWXR|08Po(XuvjH$-Rf>?P5Kw3mbSc%n zAVLhlkpVT@;Aes;F34EFnFtFeuYqM^fGM)9ZxG~dL9$YSK6r;1V&^ap&g%O=E0+ET zYANs+K?D}PpDwX2j~L*-1ir%m(jz8-fx!`5IB>R1{ zPyoD!{{;;EZxZN#=kxwk4oQII09Oo2Hl9KVASrVLG<>tbcp+!K z8#|9&%-zIKdQM~mYhwerx84Aj(iap%1YJmg?I5zdoaYFm8E-tj7&>NO2Iq7uNzZ8< zkK|)F$ylGqY?>9xWu`B=8oZGRcuHFhAG$!It$p!DPk=a95Y}&V3D~t*azGQOH}T*C zB=y)h*O>c)G9%9!WjFA-mYeVMk3XQ^Xbf@e+7YF=woMs4Qhqhy?~R0z5v*0z?EDZN z)y%X2A;^VJuKaa63eZL;q*wCGGFPRAAilo{PL<>vAQLH9?Q3^&d)+?d?1A!`%?{K` z{P4+K(C9d)AxMRT;C}JVOA7uYpq`(**=d}fgo4yrSrH9+m$c}k;3&s&NQLg!QRv5U zbygRrKV#YK`5mnXbY`D(zJJ$f9MIbeNdgVyIsA4do7J*EN+>HPsq}_9I|j7)S>VtH zr@m4ajyrkmUE-}Vt-NZpiIHj%Ply}fF#zg(wRRw)Whq8>9@1LW^`r%=7hy8H>f{H0 zvrEw7W4>Le%L~!3*wu`a2Vcr}$JRwqWhDt&Wa8u@gJzz-cHr@fT3~f!S4_#>$R%kd z2EVf)cQlQ4e?go00K1w&G#jNT7;oHXPw+Q#?c9o@%zxtOgLnAt^}%FTc{ss+8*mxQ zRvj%vaJn0HHrC56k$n8@wrx?$=Klujf8wGlDvFLs4rt6RJ@%S0$An##%k!N!wf?SH z+LOzjwQQ6I5)D1MJnYlt(x#78rfW?TPAey3lha8dItQDxr7evOIot~P*5;4k+>hl| zQS<@s^LUTJzBq>9L=}Lz`ncoXi&Uz$rD4nFivTn4PwE=Ylt`MBk};O6u-We36wTBg z6``yTT4FyRu=Bsy`Z{Nw7tTYPN)9n?5> zllVB!Qq$ENrvS@=*k%0uUWZTRIr?uBpxMutJ9Zlzcmx7|cFU2UWA#x?9h7U#z2V_zW8BkC9yT|c&VvhQZ5Lc52;}Jvmz9J) zjSB7sW#L3whVkZ@m>AZsd!=$_<%Sc{P?bzIS7w$3a>3b4{)86~rVP*Uq)ee{-vYh6 zT2$_Qn^nE@4KD0_^5?Hltk>Wt>T@@%x!)|28eY0N=k-f*_zzy27TGIFfkn1chstlE zS)E}Zyi*bH{2)B`4{ExG2HelFtw2Sx)oUwFG9zqt$g>d#*gmoyFksRJl#Kfu%Gzh0 zEw-;g5bFu(Cl^?!EC<*fb>Y1rFH94jlgL>qFeW2@?9ZJs0%Kn&K--?Cv>c3YRDc}5 z=3U6PdBNHx{%wC3KY)Q`x7q7c`$C`*V-HQlQ=`d;j3@Fi>vCY5pr_um=}La>v(TI( zni0(qk_FP|z_j;ahkQf{w^hvO{+^1IjqD@iG=V8l+_n1;t&ELTa2oJ;BJ5~fyz%DR zBs==YCDXxCbCjC?edA7KMOV=8zjbHlPAr9~rX0dJ2rnlgi%vKDJYt?bTrPDJ?`SxY z_bEJwLG*%x1}TK+KZ$VcEodiT!{$x`s=8pyf9~^I(gAEP=F_*7Hnw`?xJ2Tfvp9sn zA>q!({f)oHVHUy%q_Qu^w@4cioKFCmY2>m43F{1q9P@;JPt9~b~ z9|1DC!pH)UenENRS^cllx!YRI+}VE{pI;!O(D#n!gX zH4S_e_X((#ZsMBSdKTwd$6F;Sl{RoEJoE(4U8M>4a4+-h6hdD1 zRr&Hj&FszKg(ljP8P+?=Q@83c%h{Du7Mn3zGMi+kj4zp;^z3~{=sSk*jOhN#8nyQ7 z$YqUEOXHgg9AP%a`7SL66euR>JMXnijKQcsR^<{3$mPhblLYBrqV5e>9q0zHo~-_krZ!xRAi6lvT-;6DWN( zjB0Ng^2(jIqZ(vno$JF*7|_VmX@yvo)}W0Jh0O}?5_mXgch$UiHoMW$qkKW6;-rAi zkCIHi$q_hQzfkR% z9x$Cg4or<%J&hMRJFS$f8TlT+&vAN=U21MA&2^oiE~#8I*^!H-eZV|HnxpfrODnj` zVZ!6jGug)P&D6UGEOZzGSTnt(dnccJg!$6ju$qLq@wH#CTZ4ea*-ccTwH{V0T;r*DU8jxT7{f^LVs0Mua2?UjSYa@k7ez zEY6JvkVm#pS2-f7EWlwxOQZ1`hNaQUe?L#!EL_sKwKm*$?2WH+Ntsv8y=?=tlT{bV zqWONxlXH-2go0o1a^)Z0Q!3WY7)KjWYFvGR8!*LDw&6C7S~v3c?eyf`12X!+!Y@(t+U$Wj|q7ZL@dv5 ztWCsQx@LP6-ZaGJZQQ`V@4+;F=U&k9JK`$saL*;o-o4RN_Z}JaT^T+l-e;qDX*GF* ze1MWV(Of@HeDpVQZL|m;b07@+DWExj4pXaL#%N;pJq}!UzBqj9m=(S#wsd(Ti?wN4?C9rN7%OJi z1zLivwF?*iW%Db>tvsqlRkz=q>JUFgZJNcL^2OEprKi2?H_Cw5PS{a|2p3!+_i^z=zX>xrqFbpG4h5kw&11vfq7 zhr;*A*!`JVR>U(h{cIiu#iKonq_mg~J11!fIZVsJB@i!FsShf}yJz3afbS+;(7mfM*y^Gf6Q#JmM7t~$kZ`M_5{{5-(jAnLHdu%O4{~ZKlmbJ-*wgFHzeAgpk5D|9Sr8Q^k^Qoy!U*4wfE-b8{`xBEwt}#m z0RnDN||#pmu+@IE#-QdkDgm8@p3fod#AvMkxyBVW*#$5qw5V_)B5~ zwGXj|#3tumk45QrWv;3Uh~+P5H6S@{uu{NtP zsZ#Nc-R1-zXuv#q2>R%eySe<#d6v8ke4r}&2U|gnPh8o2EXHJNQV>L*)0c=vbuhQA zWGNXtu=Za6lD#JqP;yf%vv?P7O7(-;-TAa|FGj*y^*r?`KImy1jcBhrY9xCGN}U?1 z@`mBJK7al^%_d`A!mHTb6Z6VK5phs1wosZQY9rLBFYkIn z)4@a4w?xzK_c*_mB@;ss%S+;`1doFiue~JeQj3woFJS|XXaz*|9m}mqSOSRO4dHr0EY|&BiQb7q;cHsG>5T^UV#J)2Fptrz4XF!NQ;C00fX;pLwq?hDV{iZ<>I7pLyGQW``KcP-vZ zRxk{&mS{??Bq#QV>WE1l4)ZNFuo%mk67Od-D8`hyd{@uXAly?(y4|>xvOpzhy_qr3 zYB2(r%ou$AQ^7WLW8`d}zp%YHM_==%;zrrSd&ky5SqBv?sS!5hnL*)?Xw`H@_&-`@ zZH#9AwK%Lc&0|v-Ii^wBW9v`fqaq$Hbf>Dr9&+*iY^_SHl1W<5jWtJtD`2g695G`ls2s9%3@&Wt3&`8eI#b#1X;_csfGf$+u%10xsB$kL8zLqnQ zAShX>%vxk_Sltkh$K2_}cr7JNsCYCg71_-4bWC2%&d$<3U0j`p-fLhnSw(&x-m!<= zJOr_WZhuT&eL^kBM?!DZr`-84(yE#a*|8Z4A z`aDrKKc8p%2&`mgQLco{;uCeFpjrtW7(cp!@o_*^PF6M(sxA~-+(R?^0XiV^i(Bjk z64B4DeQ|SPyK1%XV!bF$*VX8kFH8I8s7r3oT#Z#`usJK?0R&5V`@t{s^CfS_iwu7` z)JZI(fMO`0hKBYY+i^OwNy&gdy+ESK#uH)li}LjkvvM=S-z6oiN|Dxade8`pp3x{` z%24Qnyr=LCjNkS+A)s^eM30ww^6vTNu-2z}NfZeZ5t~^XKz%%DygO?qz?Jf0VmREG z!?d>6?SOhBOPw&SR=ZZ#`1dP`q`Q!zXM&`)sQMf1`memp&9A(V8*qOFplBzeJd*;x zW~I%uHc1fS*6DnZ3kT^jA14zl+k`D!8c0We}hzvm41jE63k)P{t$RGdO4w3v{{v)kB z_5WkcnF8J26e#yHwK(1$BX>5P+VooRITbNDIi8`R8sfjm-WNy`x1pkHc$J>`XW@nc_r9#Ux z+bo=!re{Gad(|me$+1<+x<|)*y;LX(PEjpZ?urJP&bbZVfJ;HRx6ea&ihhC3M zN95>*6id*#IM0fmn_kDq7!*t4wRvT#!;}H@;iX5MMi?U$cP}%t@MgTE%9fH2i2`oI zPNgX@X0w_<8IxL?H|BbEs0z6ZiPzFjo?pqR&8o~%PzuQ3(Omp*uE2C`(x)5_ZAXw5_szW7^Uh3k3`ljfuw}=XkGPV2L~u^yF=k!fR%rOyq)m!JK^9Kf_)RChue^*%a9& zHjgH34_x6;8;e)}v~zVxY_r@33r}L#y23N2;A%Ewr8vd>f&yv7Tn*_gVv3IGtDUQ# zPOXdK-sPCCe=q201iR-~1Y?RYmi+W=r|Ts;7+R}o=B8B! zI_FEf_g^en*C&jrDqgpwutY~@3tTTTR$LjWV+J@6-mUxsosGm{(k;QVX`^O9wr<)H z%nBOL8!8-iH!h08JrzInI96KS`=khNjIv8mW)FfsQhIlDq87X0pVW|YJ0jgDFJa7J zJY=}Q{V&IF*;C@9`;C+e`);+b4ael&xNlg6Lj>F=T7ZIly`}D-g`{O*b zZV})!w*gso6<0dZu{CU=Ic}+y@ zr`W2hW1si9t%X^Jjr5Xgu{!eQjKOh3Ak4o8eI?^$-UfWE2gO{ElOqmr382&M(%sZ! zf`QmBU`0~x&D!68U)BYrYkv7z!Lt%HHAh%qZ0~dpzzyf@HIV&Ti^lB*VfD0^lC|;- zepT_nTFs|j6`NZ-GY~hq>t6+*s$?d9_n23=M}hg=AAR1qlNG z?{sDVQ~lv$)&)Tb>0qQ0uN&GmGgD}nB6<7Xz0VS#@_(M&U^Lx0a>iquy~n=`tDPEg zz=$0?ru6)1>&@W_4^$;+y@+_xs28&ZvTGp`wMiNnsXmdEcB1BBg($sxG3R&DsBQ3g zQHU+Uy&Ts5Or%8a--UbrhtB=#Jx|XU#{_KvQAzw)GU@+*fBrw!2>;qP`ae3d{Qqb2 ke`v`5|7gQboyyxVM)OkR{$>kL3T}6kD;Af_F1kJXZxm?U!vFvP literal 0 HcmV?d00001 diff --git a/docs/scale-perf.md b/docs/scale-perf.md new file mode 100644 index 000000000..d1918459b --- /dev/null +++ b/docs/scale-perf.md @@ -0,0 +1,65 @@ + +# Performance and scalability + +This doc includes certain scalability and performance data of observability and you can use this information to help you plan your environment. +The resource consumption later is not for single pod/deployment, but for the OpenShift project where observability components are installed. It's a sum value for all observability components. + + +*Note:* Data is based on the results from a lab environment at the time of testing. +Your results might vary, depending on your environment, network speed, and changes to the product. + +## Test environment +In the test environment, hub and managed clusters are located in Amazon Web Services cloud platfrom and have the same topology/configuration as below: + + +Node | Flavor | vCPU | RAM (GiB) | Disk type | Disk size(GiB)/IOS | Count | Region +--- | ------ | ---- | --------- | --------- | ------------------ | ----- | ------ +Master | m5.4xlarge | 16 |64 | gp2 | 100 | 3 | sa-east-1 +Worker | m5.4xlarge | 16 |64 | gp2 | 100 | 3 | sa-east-1 + +For the observability deployment, it uses the "High" for availabilityConfig, which means for each kubernetes deployment has 2 instances and each statefulset has 3 instances. + +During the test, different number of managed clusters will be simulated to push metrics and each test will last for 24 hours. +Throughput for each managed cluster is as below: + +Pods | Interval(minute) | Time series per min +---- | ---------------- | ------------------- +400 | 1 | 83000 + + +## CPU +During test, CPU usage keeps stable +Size | CPU Usage(millicores) +---- | -------------- +10 clusters | 400 +20 clusters | 800 + +## Memory +Memory Usage RSS is from the metrics container_memory_rss. It keeps stabl during test. +Memory Usage Working Set is from the metrics container_memory_working_set_bytes. It increases along with the test. Value below is one after 24 hours. +Size | Memory Usage RSS(GiB) | Memory Usage Working Set(GiB) +---- | --------------------- | ----------------------------- +10 clusters | 9.84 | 4.83 +20 clusters | 13.10 | 8.76 + +## Persistent volume for thanos-receive component +Except thanos-receivee components, other components don't use much disk. Because metrics will be stored in thanos-receive until rentation time reached(retention time of thanos-receive is 4 days), +Disk usage increases along with the test. Data below points out the disk usage after 1 day, so the final disk usage should multiply 4. +Size | Disk Usage(GiB) +---- | -------------- +10 clusters | 2 +20 clusters | 3 + +## Network transfer +During test, network transfer keeps stable +Size | Inbound Network Transfer(MiB per second) | Outbound Network Transfer(MiB per second) +---- | ---------------------------------------- | ----------------------------------------- +10 clusters | 6.55 | 5.80 +20 clusters | 13.08 | 10.9 + +## S3 storage +Total usage in S3 side increases along with the test. The metrics data will be kept in S3 until retention time reached(Default rentation time is 5 days). +Size | Total Usage(GiB) +---- | -------------- +10 clusters | 16.2 +20 clusters | 23.8 diff --git a/docs/setup-ceph-for-object-storage.md b/docs/setup-ceph-for-object-storage.md new file mode 100644 index 000000000..67d59d074 --- /dev/null +++ b/docs/setup-ceph-for-object-storage.md @@ -0,0 +1,202 @@ +## Install Ceph deployment + +## Background +[Ceph](https://docs.ceph.com/en/latest/start/intro/) can be installed in commodity hardware and can provide scalable Object Storage, Block Storage and Filesystem. For ACM Observability, we only focus on Object Storage. Object Storage. [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/) is an object storage interface built on top of librados to provide applications with a RESTful gateway to Ceph Storage Clusters. Ceph Object Storage supports two interfaces: +- S3-compatible: Provides object storage functionality with an interface that is compatible with a large subset of the Amazon S3 RESTful API. + +- Swift-compatible: Provides object storage functionality with an interface that is compatible with a large subset of the OpenStack Swift API. + +For ACM Observability feature, we are only interested in S3-compatible interface. + +## Our Goal +- We will lay down one way to create Ceph Cluster on a Kubernetes cluster using the [Rook Operator](https://rook.io/docs/rook/v0.9/ceph-object.html). +- And then we will create a Object Store and a Bucket. +- And finally generate the Configuration needed for ACM Observability to access the Object Store/Bucket created using S3 API. + +For this, the sample code we provided relied on this [Red Hat Blog](https://medium.com/@karansingh010/rook-ceph-deployment-on-openshift-4-2b34dfb6a442) . + +`This sample relies on a older version of Rook, v0.9 and we have tested this on OpenShift 4.3 only.` + +### Create the security context constraints + +``` +$ cd example/ceph/ +$ oc apply -f scc.yaml --validate=false +``` + +### Deploy the rook operator + +``` +$ oc create -f operator.yaml +$ oc get pods -n rook-ceph-system +NAME READY STATUS RESTARTS AGE +rook-ceph-agent-4gxgw 1/1 Running 0 18h +rook-ceph-agent-lhrv8 1/1 Running 0 18h +rook-ceph-agent-mctzr 1/1 Running 0 18h +rook-ceph-agent-qt8mb 1/1 Running 0 18h +rook-ceph-agent-xt97w 1/1 Running 0 18h +rook-ceph-agent-z59pv 1/1 Running 0 18h +rook-ceph-operator-69c6dd8dd4-wvmnf 1/1 Running 0 18h +rook-discover-9pwgc 1/1 Running 0 18h +rook-discover-bdffp 1/1 Running 0 18h +rook-discover-cqx7h 1/1 Running 0 18h +rook-discover-g4sl9 1/1 Running 0 18h +rook-discover-k74tg 1/1 Running 0 18h +rook-discover-wjfbz 1/1 Running 0 18h +``` + +### Create rook cluster + +``` +$ oc create -f cluster.yaml +$ oc get pods -n rook-ceph +NAME READY STATUS RESTARTS AGE +rook-ceph-mgr-a-7d787567d-mlptf 1/1 Running 0 17h +rook-ceph-mon-a-745d4555bb-95rpp 1/1 Running 0 17h +rook-ceph-mon-b-6ff54dbb64-wr6hq 1/1 Running 0 17h +rook-ceph-mon-c-88b4f678-ws5l2 1/1 Running 0 17h +rook-ceph-osd-0-dfd69fcd-tlgzb 1/1 Running 0 17h +rook-ceph-osd-1-75569754d-5jwgx 1/1 Running 0 17h +rook-ceph-osd-2-7fbbfc698f-md7t4 1/1 Running 0 17h +rook-ceph-osd-3-856bdc77f8-958kx 1/1 Running 0 17h +rook-ceph-osd-4-7ff6c4c8c9-rqcfp 1/1 Running 0 17h +rook-ceph-osd-5-d7f4b95d4-qnklt 1/1 Running 0 17h +rook-ceph-osd-prepare-ip-10-0-138-240-kxlg7 0/2 Completed 0 17h +rook-ceph-osd-prepare-ip-10-0-141-127-h4jht 0/2 Completed 0 17h +rook-ceph-osd-prepare-ip-10-0-146-129-2ntjk 0/2 Completed 0 17h +rook-ceph-osd-prepare-ip-10-0-151-220-dtj78 0/2 Completed 0 17h +rook-ceph-osd-prepare-ip-10-0-161-103-b4xvr 0/2 Completed 0 17h +rook-ceph-osd-prepare-ip-10-0-170-222-rd4dv 0/2 Completed 0 17h +rook-ceph-rgw-object-5b586bd796-8hqzj 1/1 Running 0 17h +rook-ceph-tools 1/1 Running 0 17h +``` + +### Verify your Ceph cluster + +``` +$ oc create -f toolbox.yaml +$ oc -n rook-ceph exec -it rook-ceph-tools bash +[root@rook-ceph-tools /]# ceph -s + cluster: + id: fd0a79e6-2332-42e9-a57b-f32153c7ffed + health: HEALTH_OK + + services: + mon: 3 daemons, quorum c,b,a + mgr: a(active) + osd: 6 osds: 6 up, 6 in + rgw: 1 daemon active + + data: + pools: 6 pools, 600 pgs + objects: 339 objects, 223 MiB + usage: 265 GiB used, 452 GiB / 717 GiB avail + pgs: 600 active+clean +``` + +### Accessing S3 object storage +Now we will create the object store, which starts the RGW service in the cluster with the S3 API using the object.yaml and confirm that it is created. + +``` +$ oc create -f object.yaml +$ oc get pod -l app=rook-ceph-rgw -n rook-ceph + +``` +Please wait for the RGW pod to be up before we proceed to the next step. + +### Create a Object Store User +Now we will create the object store user, which calls the RGW service in the cluster with the S3 API using the object-user.yaml + +``` +$ oc create -f object-user.yaml +$ oc describe secret -n rook-ceph rook-ceph-object-user-object-object +Name: rook-ceph-object-user-object-object +Namespace: rook-ceph +Labels: app=rook-ceph-rgw + rook_cluster=rook-ceph + rook_object_store=object + user=object +Annotations: + +Type: kubernetes.io/rook + +Data +==== +AccessKey: 20 bytes +SecretKey: 40 bytes + +``` + +- We can retrieve the S3 access key (`AWS_ACCESS_KEY_ID`) value as below + +``` +$ ACCESS_KEY=$(oc -n rook-ceph get secret rook-ceph-object-user-object-object -o yaml | grep AccessKey | awk '{print $2}' | base64 --decode) +$ echo $ACCESS_KEY +CDDQ0YU1C4A77A0GE54S +``` + +- We can retrieve the S3 secret access key (`AWS_SECRET_ACCESS_KEY`) as below + +``` +$ SECRET_KEY=$(oc -n rook-ceph get secret rook-ceph-object-user-object-object -o yaml | grep SecretKey | awk '{print $2}' | base64 --decode) +$ echo $SECRET_KEY +awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI +``` + +### Expose Object Store externally +Create a route to expose the RGW service. This will be called the `endpoint` below. + +``` +$ oc -n rook-ceph expose svc/rook-ceph-rgw-object +$ oc -n rook-ceph get route | awk '{ print $2 }' +HOST/PORT +rook-ceph-rgw-object-rook-ceph.apps.acm-hub.dev05.red-chesterfield.com +``` + +### Create a S3 Bucket in the Store +We will use a S3 compatible client to create a bucket in the object store. Luckily this client is provided in the toolbox installed earlier. + +Log into the toolbox container created earlier + +``` +oc -n rook-ceph exec -it rook-ceph-tools + +``` +Set up environment varibles as below +AWS_HOST: `rook-ceph-rgw-object:8081` as per this example. +AWS_ENDPOINT: `oc get service rook-ceph-rgw-object -n rook-ceph` and use `CLUSTER-IP:PORT` + +``` +[root@rook-ceph-tools /]# export AWS_ACCESS_KEY_ID=CDDQ0YU1C4A77A0GE54S +[root@rook-ceph-tools /]# export AWS_SECRET_ACCESS_KEY=awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI +[root@rook-ceph-tools /]# export AWS_HOST=rook-ceph-rgw-object:8081 +[root@rook-ceph-tools /]# export AWS_ENDPOINT=172.30.162.20:8081 +``` + +Creating a bucket called `thanos-acm` now +``` +[root@rook-ceph-tools /]# s3cmd mb --no-ssl --host=${AWS_HOST} --host-bucket= s3://thanos-acm +Bucket 's3://thanos-acm/' created +[root@rook-ceph-tools /]# s3cmd ls --no-ssl --host=${AWS_HOST} +2020-09-14 23:42 s3://thanos-acm +[root@rook-ceph-tools /]# exit +exit + +``` + +### Configuration for ACM Observability + +Your object storage configuration should as following: + +``` +type: s3 +config: + bucket: thanos-acm + endpoint: rook-ceph-rgw-object-rook-ceph.apps.acm-hub.dev05.red-chesterfield.com + insecure: true + access_key: CDDQ0YU1C4A77A0GE54S + secret_key: awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI +``` + +### Proceed with installation of ACM Observbility +Then you can be following these steps to deploy multicluster-observability-operator: https://github.com/stolostron/multicluster-observability-operator#install-this-operator-on-rhacm \ No newline at end of file diff --git a/docs/setup-ocs-for-object-storage.md b/docs/setup-ocs-for-object-storage.md new file mode 100644 index 000000000..c81a02b11 --- /dev/null +++ b/docs/setup-ocs-for-object-storage.md @@ -0,0 +1,50 @@ +## Install the OpenShift Container Storage + +Following this guide to install the OpenShift Container Storage to your cluster: https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.4/html/deploying_openshift_container_storage/deploying-openshift-container-storage-on-openshift-container-platform_rhocs#installing-openshift-container-storage-operator-using-the-operator-hub_aws-vmware + +## Accessing object storage configuration + +Following this guide to access the relevant endpoint, access key, and secret access key: https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.3/html/managing_openshift_container_storage/multicloud-object-gateway_rhocs#accessing-the-Multicloud-object-gateway-from-the-terminal_rhocs + +- access key (`AWS_ACCESS_KEY_ID` value) + +``` +$ ACCESS_KEY=$(kubectl get secret noobaa-admin -n openshift-storage -o json | jq -r '.data.AWS_ACCESS_KEY_ID|@base64d') +$ echo $ACCESS_KEY +GZnv6sSHjHQMM3UrYqsn +``` + +- secret access key (`AWS_SECRET_ACCESS_KEY` value) + +``` +$ SECRET_KEY=$(kubectl get secret noobaa-admin -n openshift-storage -o json | jq -r '.data.AWS_SECRET_ACCESS_KEY|@base64d') +$ echo $SECRET_KEY +WZIfOGYLx1DvlyKC9BII99VnSzDDJwymMZR3vAtL +``` + +- endpoint + +``` +$ kubectl get noobaa -n openshift-storage -o yaml + +... + serviceS3: + externalDNS: + - https://s3-openshift-storage.apps.acm-hub.dev05.red-chesterfield.com + - https://a916e5db6fa55485ba046a55c908147d-919567698.us-east-1.elb.amazonaws.com:443 +... +``` + +Your object storage configuration should as following: + +``` +type: s3 +config: + bucket: first.bucket + endpoint: a916e5db6fa55485ba046a55c908147d-919567698.us-east-1.elb.amazonaws.com + insecure: true + access_key: GZnv6sSHjHQMM3UrYqsn + secret_key: WZIfOGYLx1DvlyKC9BII99VnSzDDJwymMZR3vAtL +``` + +Then you can be following these steps to deploy multicluster-observability-operator: https://github.com/stolostron/multicluster-observability-operator#install-this-operator-on-rhacm diff --git a/examples/alerts/custom_rules_invalid/kustomization.yaml b/examples/alerts/custom_rules_invalid/kustomization.yaml new file mode 100644 index 000000000..15e8e5114 --- /dev/null +++ b/examples/alerts/custom_rules_invalid/kustomization.yaml @@ -0,0 +1,4 @@ +commonLabels: + alertname: NodeOutOfMemory +resources: +- thanos-ruler-custom-rules-invalid.yaml diff --git a/examples/alerts/custom_rules_invalid/thanos-ruler-custom-rules-invalid.yaml b/examples/alerts/custom_rules_invalid/thanos-ruler-custom-rules-invalid.yaml new file mode 100644 index 000000000..8218cd9b3 --- /dev/null +++ b/examples/alerts/custom_rules_invalid/thanos-ruler-custom-rules-invalid.yaml @@ -0,0 +1,18 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: thanos-ruler-custom-rules + namespace: open-cluster-management-observability +data: + custom_rules.yaml: | + groups: + - name: node-health + rules: + - alert: NodeOutOfMemory + expr: instance:node_memory_utilisation:ratio * 100 < 0 + for: 1m + labels: + instance: "{{ $labels.instance }}" + cluster: "{{ $labels.cluster }}" + clusterID: "{{ $labels.clusterID }}" + severity: warning diff --git a/examples/alerts/custom_rules_valid/kustomization.yaml b/examples/alerts/custom_rules_valid/kustomization.yaml new file mode 100644 index 000000000..880b1cf50 --- /dev/null +++ b/examples/alerts/custom_rules_valid/kustomization.yaml @@ -0,0 +1,4 @@ +commonLabels: + alertname: NodeOutOfMemory +resources: +- thanos-ruler-custom-rules-valid.yaml diff --git a/examples/alerts/custom_rules_valid/thanos-ruler-custom-rules-valid.yaml b/examples/alerts/custom_rules_valid/thanos-ruler-custom-rules-valid.yaml new file mode 100644 index 000000000..dd314b8e1 --- /dev/null +++ b/examples/alerts/custom_rules_valid/thanos-ruler-custom-rules-valid.yaml @@ -0,0 +1,18 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: thanos-ruler-custom-rules + namespace: open-cluster-management-observability +data: + custom_rules.yaml: | + groups: + - name: node-health + rules: + - alert: NodeOutOfMemory + expr: instance:node_memory_utilisation:ratio * 100 > 0 + for: 1m + labels: + instance: "{{ $labels.instance }}" + cluster: "{{ $labels.cluster }}" + clusterID: "{{ $labels.clusterID }}" + severity: warning diff --git a/examples/ceph/cluster.yaml b/examples/ceph/cluster.yaml new file mode 100644 index 000000000..dfcaf09fc --- /dev/null +++ b/examples/ceph/cluster.yaml @@ -0,0 +1,264 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-osd + namespace: rook-ceph +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: [ "get", "list", "watch", "create", "update", "delete" ] +--- +# Aspects of ceph-mgr that require access to the system namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +# Aspects of ceph-mgr that operate within the cluster's namespace +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +rules: +- apiGroups: + - "" + resources: + - pods + - services + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +--- +# Allow the operator to create resources in this cluster's namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-cluster-mgmt + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-cluster-mgmt +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# Allow the osd pods in this namespace to work with configmaps +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-osd + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-osd +subjects: +- kind: ServiceAccount + name: rook-ceph-osd + namespace: rook-ceph +--- +# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access the rook system resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-system + namespace: rook-ceph-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-mgr-system +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-cluster + namespace: rook-ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-mgr-cluster +subjects: +- kind: ServiceAccount + name: rook-ceph-mgr + namespace: rook-ceph +--- +apiVersion: ceph.rook.io/v1 +kind: CephCluster +metadata: + name: rook-ceph + namespace: rook-ceph +spec: + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v12 is luminous, v13 is mimic, and v14 is nautilus. + # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + image: ceph/ceph:v13.2.4-20190109 + # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. + # After nautilus is released, Rook will be updated to support nautilus. + # Do not set to true in production. + allowUnsupported: false + # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + # set the amount of mons to be started + mon: + count: 3 + allowMultiplePerNode: true + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # serve the dashboard using SSL + # ssl: true + network: + # toggle to use hostNetwork + hostNetwork: false + rbdMirroring: + # The number of daemons that will perform the rbd mirroring. + # rbd mirroring must be configured with "rbd mirror" from the rook toolbox. + workers: 0 + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. +# placement: +# all: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: role +# operator: In +# values: +# - storage-node +# podAffinity: +# podAntiAffinity: +# tolerations: +# - key: storage-node +# operator: Exists +# The above placement information can also be specified for mon, osd, and mgr components +# mon: +# osd: +# mgr: + resources: + mgr: + limits: + cpu: "500m" + memory: "1024Mi" + requests: + cpu: "500m" + memory: "1024Mi" +# The above example requests/limits can also be added to the mon and osd components +# mon: +# osd: + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + deviceFilter: + location: + config: + # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories. + # Set the storeType explicitly only if it is required not to use the default. + # storeType: bluestore + databaseSizeMB: "3072" # this value can be removed for environments with normal sized disks (100 GB or larger) + walSizeMB: "1024" + #journalSizeMB: "1024" # this value can be removed for environments with normal sized disks (20 GB or larger) + osdsPerDevice: "1" # this value can be overridden at the node or device level +# Cluster level list of directories to use for storage. These values will be set for all nodes that have no `directories` set. +# directories: +# - path: /rook/storage-dir +# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named +# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. +# nodes: +# - name: "172.17.4.101" +# directories: # specific directories to use for storage can be specified for each node +# - path: "/rook/storage-dir" +# resources: +# limits: +# cpu: "500m" +# memory: "1024Mi" +# requests: +# cpu: "500m" +# memory: "1024Mi" +# - name: "172.17.4.201" +# devices: # specific devices to use for storage can be specified for each node +# - name: "sdb" +# - name: "nvme01" # multiple osds can be created on high performance devices +# config: +# osdsPerDevice: "5" +# config: # configuration can be specified at the node level which overrides the cluster level config +# storeType: filestore +# - name: "172.17.4.301" +# deviceFilter: "^sd." diff --git a/examples/ceph/object-user.yaml b/examples/ceph/object-user.yaml new file mode 100644 index 000000000..24203d2a6 --- /dev/null +++ b/examples/ceph/object-user.yaml @@ -0,0 +1,8 @@ +apiVersion: ceph.rook.io/v1 +kind: CephObjectStoreUser +metadata: + name: object + namespace: rook-ceph +spec: + store: object + displayName: s3-user3 diff --git a/examples/ceph/object.yaml b/examples/ceph/object.yaml new file mode 100644 index 000000000..d4df730d4 --- /dev/null +++ b/examples/ceph/object.yaml @@ -0,0 +1,58 @@ +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: object + namespace: rook-ceph +spec: + # The pool spec used to create the metadata pools + metadataPool: + failureDomain: host + replicated: + # Increase the replication size if you have more than one osd + size: 2 + # The pool spec used to create the data pool + dataPool: + failureDomain: host + replicated: + size: 2 + # If you have at least three osds, erasure coding can be specified + # erasureCoded: + # dataChunks: 2 + # codingChunks: 1 + # The gaeteway service configuration + gateway: + # type of the gateway (s3) + type: s3 + # A reference to the secret in the rook namespace where the ssl certificate is stored + sslCertificateRef: + # The port that RGW pods will listen on (http) + port: 8081 + # The port that RGW pods will listen on (https). An ssl certificate is required. + securePort: + # The number of pods in the rgw deployment (ignored if allNodes=true) + instances: 1 + # Whether the rgw pods should be deployed on all nodes as a daemonset + allNodes: false + # The affinity rules to apply to the rgw deployment or daemonset. + placement: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - rgw-node + # tolerations: + # - key: rgw-node + # operator: Exists + # podAffinity: + # podAntiAffinity: + resources: + # The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory + # limits: + # cpu: "500m" + # memory: "1024Mi" + # requests: + # cpu: "500m" + # memory: "1024Mi" diff --git a/examples/ceph/operator.yaml b/examples/ceph/operator.yaml new file mode 100644 index 000000000..f5ee45ead --- /dev/null +++ b/examples/ceph/operator.yaml @@ -0,0 +1,485 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: rook-ceph-system +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephclusters.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephCluster + listKind: CephClusterList + plural: cephclusters + singular: cephcluster + scope: Namespaced + version: v1 + validation: + openAPIV3Schema: + properties: + spec: + properties: + cephVersion: + properties: + allowUnsupported: + type: boolean + image: + type: string + name: + pattern: ^(luminous|mimic|nautilus)$ + type: string + dashboard: + properties: + enabled: + type: boolean + urlPrefix: + type: string + port: + type: integer + dataDirHostPath: + pattern: ^/(\S+) + type: string + mon: + properties: + allowMultiplePerNode: + type: boolean + count: + maximum: 9 + minimum: 1 + type: integer + required: + - count + network: + properties: + hostNetwork: + type: boolean + storage: + properties: + nodes: + items: {} + type: array + useAllDevices: {} + useAllNodes: + type: boolean + required: + - mon + additionalPrinterColumns: + - name: DataDirHostPath + type: string + description: Directory used on the K8s nodes + JSONPath: .spec.dataDirHostPath + - name: MonCount + type: string + description: Number of MONs + JSONPath: .spec.mon.count + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + - name: State + type: string + description: Current State + JSONPath: .status.state +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephfilesystems.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephFilesystem + listKind: CephFilesystemList + plural: cephfilesystems + singular: cephfilesystem + scope: Namespaced + version: v1 + additionalPrinterColumns: + - name: MdsCount + type: string + description: Number of MDSs + JSONPath: .spec.metadataServer.activeCount + - name: Age + type: date + JSONPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephobjectstores.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStore + listKind: CephObjectStoreList + plural: cephobjectstores + singular: cephobjectstore + scope: Namespaced + version: v1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephobjectstoreusers.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephObjectStoreUser + listKind: CephObjectStoreUserList + plural: cephobjectstoreusers + singular: cephobjectstoreuser + scope: Namespaced + version: v1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: cephblockpools.ceph.rook.io +spec: + group: ceph.rook.io + names: + kind: CephBlockPool + listKind: CephBlockPoolList + plural: cephblockpools + singular: cephblockpool + scope: Namespaced + version: v1 +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: volumes.rook.io +spec: + group: rook.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + singular: volume + shortNames: + - rv + scope: Namespaced + version: v1alpha2 +--- +# The cluster role for managing all the cluster-specific resources in a namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: rook-ceph-cluster-mgmt + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - secrets + - pods + - pods/log + - services + - configmaps + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - extensions + resources: + - deployments + - daemonsets + - replicasets + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# The role for the operator to manage resources in the system namespace +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - extensions + resources: + - daemonsets + verbs: + - get + - list + - watch + - create + - update + - delete +--- +# The cluster role for managing the Rook CRDs +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: rook-ceph-global + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + # Pod access is needed for fencing + - pods + # Node access is needed for determining nodes where mons should run + - nodes + - nodes/proxy + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + # PVs and PVCs are managed by the Rook provisioner + - persistentvolumes + - persistentvolumeclaims + verbs: + - get + - list + - watch + - patch + - create + - update + - delete +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - ceph.rook.io + resources: + - "*" + verbs: + - "*" +- apiGroups: + - rook.io + resources: + - "*" + verbs: + - "*" +--- +# Aspects of ceph-mgr that require cluster-wide access +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-mgr-cluster + labels: + operator: rook + storage-backend: ceph +rules: +- apiGroups: + - "" + resources: + - configmaps + - nodes + - nodes/proxy + verbs: + - get + - list + - watch +--- +# The rook system service account used by the operator, agent, and discovery pods +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +--- +# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-system + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rook-ceph-system +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: rook-ceph-global + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: rook-ceph-global +subjects: +- kind: ServiceAccount + name: rook-ceph-system + namespace: rook-ceph-system +--- +# The deployment for the rook operator +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: rook-ceph-operator + namespace: rook-ceph-system + labels: + operator: rook + storage-backend: ceph +spec: + replicas: 1 + template: + metadata: + labels: + app: rook-ceph-operator + spec: + serviceAccountName: rook-ceph-system + containers: + - name: rook-ceph-operator + image: rook/ceph:v0.9.2 + args: ["ceph", "operator"] + volumeMounts: + - mountPath: /var/lib/rook + name: rook-config + - mountPath: /etc/ceph + name: default-config-dir + env: + # To disable RBAC, uncomment the following: + # - name: RBAC_ENABLED + # value: "false" + # Rook Agent toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: AGENT_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate + # - name: AGENT_TOLERATION_KEY + # value: "" + # (Optional) Rook Agent mount security mode. Can by `Any` or `Restricted`. + # `Any` uses Ceph admin credentials by default/fallback. + # For using `Restricted` you must have a Ceph secret in each namespace storage should be consumed from and + # set `mountUser` to the Ceph user, `mountSecret` to the Kubernetes secret name. + # to the namespace in which the `mountSecret` Kubernetes secret namespace. + # - name: AGENT_MOUNT_SECURITY_MODE + # value: "Any" + # Set the path where the Rook agent can find the flex volumes + - name: FLEXVOLUME_DIR_PATH + value: "/etc/kubernetes/kubelet-plugins/volume/exec" + # Set the path where kernel modules can be found + # - name: LIB_MODULES_DIR_PATH + # value: "" + # Mount any extra directories into the agent container + # - name: AGENT_MOUNTS + # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" + # Rook Discover toleration. Will tolerate all taints with all keys. + # Choose between NoSchedule, PreferNoSchedule and NoExecute: + # - name: DISCOVER_TOLERATION + # value: "NoSchedule" + # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate + # - name: DISCOVER_TOLERATION_KEY + # value: "" + # Allow rook to create multiple file systems. Note: This is considered + # an experimental feature in Ceph as described at + # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster + # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027 + - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS + value: "false" + # The logging level for the operator: INFO | DEBUG + - name: ROOK_LOG_LEVEL + value: "INFO" + # The interval to check if every mon is in the quorum. + - name: ROOK_MON_HEALTHCHECK_INTERVAL + value: "45s" + # The duration to wait before trying to failover or remove/replace the + # current mon with a new mon (useful for compensating flapping network). + - name: ROOK_MON_OUT_TIMEOUT + value: "300s" + # The duration between discovering devices in the rook-discover daemonset. + - name: ROOK_DISCOVER_DEVICES_INTERVAL + value: "60m" + # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. + # This is necessary to workaround the anyuid issues when running on OpenShift. + # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 + - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED + value: "true" + # In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins). + # Disable it here if you have similiar issues. + # For more details see https://github.com/rook/rook/issues/2417 + - name: ROOK_ENABLE_SELINUX_RELABELING + value: "true" + # In large volumes it will take some time to chown all the files. Disable it here if you have performance issues. + # For more details see https://github.com/rook/rook/issues/2254 + - name: ROOK_ENABLE_FSGROUP + value: "true" + # The name of the node to pass with the downward API + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # The pod name to pass with the downward API + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + # The pod namespace to pass with the downward API + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: rook-config + emptyDir: {} + - name: default-config-dir + emptyDir: {} diff --git a/examples/ceph/scc.yaml b/examples/ceph/scc.yaml new file mode 100644 index 000000000..5ea72e090 --- /dev/null +++ b/examples/ceph/scc.yaml @@ -0,0 +1,44 @@ +kind: SecurityContextConstraints +# older versions of openshift have "apiVersion: v1" +apiVersion: security.openshift.io/v1 +metadata: + name: rook-ceph +allowPrivilegedContainer: true +allowHostNetwork: true +allowHostDirVolumePlugin: true +priority: +allowedCapabilities: [] +allowHostPorts: false +allowHostPID: true +allowHostIPC: false +readOnlyRootFilesystem: false +requiredDropCapabilities: [] +defaultAddCapabilities: [] +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +fsGroup: + type: MustRunAs +supplementalGroups: + type: RunAsAny +allowedFlexVolumes: + - driver: "ceph.rook.io/rook" + - driver: "ceph.rook.io/rook-ceph" +volumes: + - configMap + - downwardAPI + - emptyDir + - flexVolume + - hostPath + - persistentVolumeClaim + - projected + - secret +users: + # A user needs to be added for each rook service account. + # This assumes running in the default sample "rook-ceph" and "rook-ceph-system" namespaces. + # If other namespaces or service accounts are configured, they need to be updated here. + - system:serviceaccount:rook-ceph-system:rook-ceph-system + - system:serviceaccount:rook-ceph:default + - system:serviceaccount:rook-ceph:rook-ceph-mgr + - system:serviceaccount:rook-ceph:rook-ceph-osd diff --git a/examples/ceph/toolbox.yaml b/examples/ceph/toolbox.yaml new file mode 100644 index 000000000..db12f0f68 --- /dev/null +++ b/examples/ceph/toolbox.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: rook-ceph-tools + namespace: rook-ceph +spec: + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: rook-ceph-tools + image: rook/ceph-toolbox:master + imagePullPolicy: IfNotPresent + env: + - name: ROOK_ADMIN_SECRET + valueFrom: + secretKeyRef: + name: rook-ceph-mon + key: admin-secret + securityContext: + privileged: true + volumeMounts: + - mountPath: /dev + name: dev + - mountPath: /sys/bus + name: sysbus + - mountPath: /lib/modules + name: libmodules + - name: mon-endpoint-volume + mountPath: /etc/rook + hostNetwork: false + volumes: + - name: dev + hostPath: + path: /dev + - name: sysbus + hostPath: + path: /sys/bus + - name: libmodules + hostPath: + path: /lib/modules + - name: mon-endpoint-volume + configMap: + name: rook-ceph-mon-endpoints + items: + - key: data + path: mon-endpoints diff --git a/examples/dashboards/kubernetes_pvc_dashboard/custom-kubernetes-pvc-dashboard.yaml b/examples/dashboards/kubernetes_pvc_dashboard/custom-kubernetes-pvc-dashboard.yaml new file mode 100644 index 000000000..a7341b10e --- /dev/null +++ b/examples/dashboards/kubernetes_pvc_dashboard/custom-kubernetes-pvc-dashboard.yaml @@ -0,0 +1,506 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubernetes-pvc-dashboard + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: "true" +data: + sample.yaml: |- + { + "annotations": { + "list": [ + { + "$$hashKey": "object:93", + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 10, + "iteration": 1533213184361, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 6, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum((((kubelet_volume_stats_capacity_bytes-kubelet_volume_stats_available_bytes) / kubelet_volume_stats_capacity_bytes)*100)>bool $capacity)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "1,1", + "title": "Number of PVCs > $capacity% Used", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "No PVCs using > $capacity% capacity", + "value": "0" + } + ], + "valueName": "avg" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 9, + "x": 6, + "y": 0 + }, + "hideTimeOverride": false, + "id": 15, + "links": [], + "options": {}, + "pageSize": null, + "repeat": null, + "repeatDirection": "h", + "scroll": true, + "showHeader": true, + "sort": { + "col": 12, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "PVC", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "persistentvolumeclaim", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "% Used", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 1, + "pattern": "Value", + "thresholds": [], + "type": "number", + "unit": "percent" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "((kubelet_volume_stats_capacity_bytes-kubelet_volume_stats_available_bytes) / kubelet_volume_stats_capacity_bytes * 100)>$capacity", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "List of PVCs > $capacity% used", + "transform": "table", + "type": "table" + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 6 + }, + "id": 13, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(kube_persistentvolumeclaim_status_phase{phase=\"Bound\"}!=bool 1)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "1,1", + "title": "Number of Unbound PVCs", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "No unbound PVCs", + "value": "0" + } + ], + "valueName": "avg" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 9, + "x": 6, + "y": 6 + }, + "id": 17, + "links": [], + "options": {}, + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 11, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "persistentvolumeclaim", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "kube_persistentvolumeclaim_status_phase{phase=\"Bound\", namespace=\"open-cluster-management-observability\"}!=1", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "Unbound PVCs", + "transform": "table", + "type": "table" + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "persistent volumes", + "kubernetes" + ], + "templating": { + "list": [ + { + "current": { + "text": "Observatorium", + "value": "Observatorium" + }, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "open-cluster-management-observability", + "value": "open-cluster-management-observability" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(kubelet_volume_stats_capacity_bytes, namespace)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "tags": [], + "text": "50", + "value": "50" + }, + "hide": 0, + "includeAll": false, + "label": "Capacity", + "multi": false, + "name": "capacity", + "options": [ + { + "$$hashKey": "object:652", + "selected": true, + "text": "50", + "value": "50" + }, + { + "$$hashKey": "object:653", + "selected": false, + "text": "60", + "value": "60" + }, + { + "$$hashKey": "object:654", + "selected": false, + "text": "70", + "value": "70" + }, + { + "$$hashKey": "object:655", + "selected": false, + "text": "80", + "value": "80" + }, + { + "$$hashKey": "object:656", + "selected": false, + "text": "90", + "value": "90" + }, + { + "$$hashKey": "object:657", + "selected": false, + "text": "95", + "value": "95" + } + ], + "query": "50,60,70,80,90,95", + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes Persistent Volumes", + "uid": "KweC0j4mz", + "version": 1898 + } diff --git a/examples/dashboards/kubernetes_pvc_dashboard/custom-metrics-allowlist.yaml b/examples/dashboards/kubernetes_pvc_dashboard/custom-metrics-allowlist.yaml new file mode 100644 index 000000000..f30af1c98 --- /dev/null +++ b/examples/dashboards/kubernetes_pvc_dashboard/custom-metrics-allowlist.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: observability-metrics-custom-allowlist + namespace: open-cluster-management-observability +data: + metrics_list.yaml: | + names: + - kube_persistentvolumeclaim_status_phase diff --git a/examples/dashboards/kubernetes_pvc_dashboard/custom-number-of-clusters.yaml b/examples/dashboards/kubernetes_pvc_dashboard/custom-number-of-clusters.yaml new file mode 100644 index 000000000..0dc451354 --- /dev/null +++ b/examples/dashboards/kubernetes_pvc_dashboard/custom-number-of-clusters.yaml @@ -0,0 +1,184 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: number-of-clusters-dashboard + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: "true" +data: + sample.yaml: |- + { + "annotations": { + "list": [ + { + "$$hashKey": "object:93", + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 10, + "iteration": 1533213184361, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 18, + "x": 0, + "y": 0 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(count(cluster_infrastructure_provider{cluster=~\".*\"}) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of Managed Clusters", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 16, + "style": "dark", + "tags": [ + "managed cluster", + "acm" + ], + "templating": { + "list": [ + { + "current": { + "text": "Observatorium", + "value": "Observatorium" + }, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "The Number of Managed Cluster", + "uid": "KweCsj4mz", + "version": 1111 + } diff --git a/examples/dashboards/kubernetes_pvc_dashboard/dashboard_subscription.yaml b/examples/dashboards/kubernetes_pvc_dashboard/dashboard_subscription.yaml new file mode 100644 index 000000000..4ca1bfe8a --- /dev/null +++ b/examples/dashboards/kubernetes_pvc_dashboard/dashboard_subscription.yaml @@ -0,0 +1,25 @@ +apiVersion: apps.open-cluster-management.io/v1 +kind: Channel +metadata: + name: get-custom-dashboards + namespace: open-cluster-management-observability +spec: + type: GitHub + pathname: https://github.com/stolostron/observability-gitops.git + secretRef: + name: github-credentials +--- + +apiVersion: apps.open-cluster-management.io/v1 +kind: Subscription +metadata: + name: dashboard-subscription + namespace: open-cluster-management-observability + annotations: + apps.open-cluster-management.io/git-branch: main + apps.open-cluster-management.io/git-path: dashboards/kubernetes_pvc_dashboard + apps.open-cluster-management.io/reconcile-option: replace +spec: + channel: open-cluster-management-observability/get-custom-dashboards + placement: + local: true diff --git a/examples/dashboards/kubernetes_pvc_dashboard/kustomization.yaml b/examples/dashboards/kubernetes_pvc_dashboard/kustomization.yaml new file mode 100644 index 000000000..90c1b59c0 --- /dev/null +++ b/examples/dashboards/kubernetes_pvc_dashboard/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- custom-kubernetes-pvc-dashboard.yaml +- custom-metrics-allowlist.yaml +- custom-number-of-clusters.yaml diff --git a/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml b/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml new file mode 100644 index 000000000..a2753e32b --- /dev/null +++ b/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml @@ -0,0 +1,18 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: sample-dashboard + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: "true" +data: + sample.yaml: | + { + "id": "e2e", + "uid": null, + "title": "Sample Dashboard for E2E", + "tags": [ "test" ], + "timezone": "browser", + "schemaVersion": 16, + "version": 1 + } diff --git a/examples/dashboards/sample_custom_dashboard/kustomization.yaml b/examples/dashboards/sample_custom_dashboard/kustomization.yaml new file mode 100644 index 000000000..e349cf8b1 --- /dev/null +++ b/examples/dashboards/sample_custom_dashboard/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- custom-sample-dashboard.yaml diff --git a/examples/dashboards/update_sample_custom_dashboard/kustomization.yaml b/examples/dashboards/update_sample_custom_dashboard/kustomization.yaml new file mode 100644 index 000000000..81d35bd93 --- /dev/null +++ b/examples/dashboards/update_sample_custom_dashboard/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- update-custom-sample-dashboard.yaml diff --git a/examples/dashboards/update_sample_custom_dashboard/update-custom-sample-dashboard.yaml b/examples/dashboards/update_sample_custom_dashboard/update-custom-sample-dashboard.yaml new file mode 100644 index 000000000..6e73cb658 --- /dev/null +++ b/examples/dashboards/update_sample_custom_dashboard/update-custom-sample-dashboard.yaml @@ -0,0 +1,18 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: sample-dashboard + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: "true" +data: + sample.yaml: | + { + "id": "e2e", + "uid": null, + "title": "Update Sample Dashboard for E2E", + "tags": [ "test" ], + "timezone": "browser", + "schemaVersion": 16, + "version": 1 + } diff --git a/examples/mco/e2e/v1beta1/kustomization.yaml b/examples/mco/e2e/v1beta1/kustomization.yaml new file mode 100644 index 000000000..f0c8a4650 --- /dev/null +++ b/examples/mco/e2e/v1beta1/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- observability.yaml diff --git a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml new file mode 100644 index 000000000..02f5f698e --- /dev/null +++ b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml @@ -0,0 +1,28 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability +spec: + advanced: + retentionConfig: + retentionResolution1h: 30d + retentionResolution5m: 14d + retentionResolutionRaw: 5d + enableDownsampling: false + imagePullPolicy: Always + imagePullSecret: multiclusterhub-operator-pull-secret + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 300 + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/mco/e2e/v1beta1/observability.yaml b/examples/mco/e2e/v1beta1/observability.yaml new file mode 100644 index 000000000..ee59f4ce4 --- /dev/null +++ b/examples/mco/e2e/v1beta1/observability.yaml @@ -0,0 +1,18 @@ +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: {} + retentionResolutionRaw: 5d + retentionResolution5m: 14d + retentionResolution1h: 30d + storageConfigObject: + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + statefulSetSize: 1Gi + statefulSetStorageClass: gp2 diff --git a/examples/mco/e2e/v1beta2/kustomization.yaml b/examples/mco/e2e/v1beta2/kustomization.yaml new file mode 100644 index 000000000..f0c8a4650 --- /dev/null +++ b/examples/mco/e2e/v1beta2/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- observability.yaml diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml new file mode 100644 index 000000000..7497d4c36 --- /dev/null +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -0,0 +1,114 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/metrics/allowlist/custom-metrics-allowlist.yaml b/examples/metrics/allowlist/custom-metrics-allowlist.yaml new file mode 100644 index 000000000..16e8513ea --- /dev/null +++ b/examples/metrics/allowlist/custom-metrics-allowlist.yaml @@ -0,0 +1,13 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: observability-metrics-custom-allowlist + namespace: open-cluster-management-observability +data: + metrics_list.yaml: | + names: + - node_memory_Active_bytes # this custom metric is being collected from your managed clusters + - -cluster_version_payload # this default metric is not being collected from your managed clusters + - -instance:node_num_cpu:sum # this default metric is not being collected from your managed clusters + matches: + - -__name__="go_goroutines",job="apiserver" # this default metric is not being collected from your managed clusters diff --git a/examples/metrics/allowlist/kustomization.yaml b/examples/metrics/allowlist/kustomization.yaml new file mode 100644 index 000000000..4f9bafee2 --- /dev/null +++ b/examples/metrics/allowlist/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- custom-metrics-allowlist.yaml diff --git a/examples/minio/kustomization.yaml b/examples/minio/kustomization.yaml new file mode 100644 index 000000000..000a44919 --- /dev/null +++ b/examples/minio/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- minio-deployment.yaml +- minio-pvc.yaml +- minio-secret.yaml +- minio-service.yaml diff --git a/examples/minio/minio-deployment.yaml b/examples/minio/minio-deployment.yaml new file mode 100644 index 000000000..6b6302c52 --- /dev/null +++ b/examples/minio/minio-deployment.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio + namespace: open-cluster-management-observability + labels: + app.kubernetes.io/name: minio +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: minio + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: minio + spec: + containers: + - command: + - /bin/sh + - -c + - mkdir -p /storage/thanos && /usr/bin/minio server /storage + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + image: quay.io/minio/minio:RELEASE.2021-08-25T00-41-18Z + name: minio + ports: + - containerPort: 9000 + protocol: TCP + volumeMounts: + - mountPath: /storage + name: storage + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio diff --git a/examples/minio/minio-pvc.yaml b/examples/minio/minio-pvc.yaml new file mode 100644 index 000000000..43ed946a7 --- /dev/null +++ b/examples/minio/minio-pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: minio + name: minio + namespace: open-cluster-management-observability +spec: + storageClassName: gp2 + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" diff --git a/examples/minio/minio-secret.yaml b/examples/minio/minio-secret.yaml new file mode 100644 index 000000000..584060d82 --- /dev/null +++ b/examples/minio/minio-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + thanos.yaml: dHlwZTogczMKY29uZmlnOgogIGJ1Y2tldDogInRoYW5vcyIKICBlbmRwb2ludDogIm1pbmlvOjkwMDAiCiAgaW5zZWN1cmU6IHRydWUKICBhY2Nlc3Nfa2V5OiAibWluaW8iCiAgc2VjcmV0X2tleTogIm1pbmlvMTIzIg== +kind: Secret +metadata: + name: thanos-object-storage + namespace: open-cluster-management-observability +type: Opaque diff --git a/examples/minio/minio-service.yaml b/examples/minio/minio-service.yaml new file mode 100644 index 000000000..655a2ee84 --- /dev/null +++ b/examples/minio/minio-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: minio + namespace: open-cluster-management-observability +spec: + ports: + - port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app.kubernetes.io/name: minio + type: ClusterIP diff --git a/examples/policy/kustomization.yaml b/examples/policy/kustomization.yaml new file mode 100644 index 000000000..a462c4054 --- /dev/null +++ b/examples/policy/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- limitRange.yaml +- resourceQuota.yaml diff --git a/examples/policy/limitRange.yaml b/examples/policy/limitRange.yaml new file mode 100644 index 000000000..a8bdf33f7 --- /dev/null +++ b/examples/policy/limitRange.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-range + namespace: open-cluster-management-observability +spec: + limits: + - max: + cpu: "5.25" + memory: "12Gi" + defaultRequest: + cpu: "10m" + memory: "50Mi" + type: Container diff --git a/examples/policy/resourceQuota.yaml b/examples/policy/resourceQuota.yaml new file mode 100644 index 000000000..1c439b434 --- /dev/null +++ b/examples/policy/resourceQuota.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: resource-quota + namespace: open-cluster-management-observability +spec: + hard: + cpu: "5.25" + memory: "12Gi" diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..fcfd5da77 --- /dev/null +++ b/go.mod @@ -0,0 +1,198 @@ +module github.com/stolostron/multicluster-observability-operator + +go 1.17 + +require ( + github.com/IBM/controller-filtered-cache v0.3.3 + github.com/cenkalti/backoff v2.2.1+incompatible + github.com/cloudflare/cfssl v1.6.0 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 + github.com/go-kit/kit v0.11.0 + github.com/go-logr/logr v0.4.0 + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.2 + github.com/golang/snappy v0.0.4 + github.com/hashicorp/go-version v1.3.0 + github.com/oklog/run v1.1.0 + github.com/onsi/ginkgo v1.16.4 + github.com/onsi/gomega v1.15.0 + github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible + github.com/openshift/client-go v0.0.0-20210916133943-9acee1a0fb83 + github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660 + github.com/pkg/errors v0.9.1 + github.com/prometheus-community/prom-label-proxy v0.3.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.1 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.47.1 + github.com/prometheus/alertmanager v0.22.2 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.30.0 + github.com/prometheus/prometheus v2.3.2+incompatible + github.com/spf13/cobra v1.2.1 + github.com/spf13/pflag v1.0.5 + github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99 + github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f + github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799 + github.com/stretchr/testify v1.7.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.22.1 + k8s.io/apiextensions-apiserver v0.22.1 + k8s.io/apimachinery v0.22.1 + k8s.io/client-go v13.0.0+incompatible + k8s.io/klog v1.0.0 + k8s.io/kubectl v0.21.2 + open-cluster-management.io/addon-framework v0.0.0-20211014025435-1f42884cdd53 + open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9 + sigs.k8s.io/controller-runtime v0.9.7 + sigs.k8s.io/kube-storage-version-migrator v0.0.3 + sigs.k8s.io/kustomize/api v0.8.8 + sigs.k8s.io/kustomize/v3 v3.3.1 + sigs.k8s.io/yaml v1.2.0 +) + +require ( + cloud.google.com/go v0.87.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.19 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.14 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 // indirect + github.com/armon/go-metrics v0.3.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/brancz/locutus v0.0.0-20210511124350-7a84f4d1bcb3 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dennwc/varint v1.0.0 // indirect + github.com/edsrzf/mmap-go v1.0.0 // indirect + github.com/efficientgo/tools/core v0.0.0-20210201224146-3d78f4d30648 // indirect + github.com/emicklei/go-restful v2.14.2+incompatible // indirect + github.com/evanphx/json-patch v4.11.0+incompatible // indirect + github.com/fatih/color v1.10.0 // indirect + github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-errors/errors v1.0.1 // indirect + github.com/go-kit/log v0.1.0 // indirect + github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logr/zapr v0.4.0 // indirect + github.com/go-openapi/analysis v0.20.0 // indirect + github.com/go-openapi/errors v0.20.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/loads v0.20.2 // indirect + github.com/go-openapi/runtime v0.19.28 // indirect + github.com/go-openapi/spec v0.20.3 // indirect + github.com/go-openapi/strfmt v0.20.1 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-openapi/validate v0.20.2 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/gobuffalo/flect v0.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/certificate-transparency-go v1.0.21 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.2.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect + github.com/hashicorp/consul/api v1.10.0 // indirect + github.com/hashicorp/go-hclog v0.14.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.0 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.11 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect + github.com/openshift/library-go v0.0.0-20210916194400-ae21aab32431 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/robfig/cron v1.2.0 // indirect + github.com/stretchr/objx v0.3.0 // indirect + github.com/uber/jaeger-client-go v2.29.1+incompatible // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/weppos/publicsuffix-go v0.13.0 // indirect + github.com/xlab/treeprint v1.1.0 // indirect + github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf // indirect + github.com/zmap/zlint/v3 v3.0.0 // indirect + go.mongodb.org/mongo-driver v1.5.1 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.10 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.19.0 // indirect + golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 // indirect + golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect + golang.org/x/text v0.3.6 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/tools v0.1.5 // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/apiserver v0.22.1 // indirect + k8s.io/component-base v0.22.1 // indirect + k8s.io/klog/v2 v2.10.0 // indirect + k8s.io/kube-aggregator v0.22.1 // indirect + k8s.io/kube-openapi v0.0.0-20210929172449-94abcedd1aa4 // indirect + k8s.io/utils v0.0.0-20210802155522-efc7438f0176 // indirect + sigs.k8s.io/kustomize/kyaml v0.10.17 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect +) + +replace ( + github.com/go-openapi/analysis => github.com/go-openapi/analysis v0.19.5 + github.com/go-openapi/loads => github.com/go-openapi/loads v0.19.5 + github.com/go-openapi/spec => github.com/go-openapi/spec v0.19.5 + github.com/hashicorp/consul => github.com/hashicorp/consul v1.10.6 + github.com/kubevirt/terraform-provider-kubevirt => github.com/nirarg/terraform-provider-kubevirt v0.0.0-20201222125919-101cee051ed3 + github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe + github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d + github.com/openshift/api => github.com/openshift/api v0.0.0-20210331193751-3acddb19d360 + github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 + github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20210802140536-4d8d83dcd464 + github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20210811141203-dcb07e8eac34 + github.com/terraform-providers/terraform-provider-aws => github.com/openshift/terraform-provider-aws v1.60.1-0.20200630224953-76d1fb4e5699 + github.com/terraform-providers/terraform-provider-azurerm => github.com/openshift/terraform-provider-azurerm v1.40.1-0.20200707062554-97ea089cc12a + github.com/terraform-providers/terraform-provider-ignition/v2 => github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0 + golang.org/x/text => golang.org/x/text v0.3.5 + k8s.io/api => k8s.io/api v0.21.3 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.3 + k8s.io/client-go => k8s.io/client-go v0.21.0 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 + kubevirt.io/client-go => kubevirt.io/client-go v0.29.0 + open-cluster-management.io/addon-framework => open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1 + // HiveConfig import dependancies + sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837 + sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b + sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e + sigs.k8s.io/kube-storage-version-migrator => github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78 +) + +// needed because otherwise installer fetches a library-go version that requires bitbucket.com/ww/goautoneg which is dead +// Tagged version fetches github.com/munnerz/goautoneg instead +replace github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..d4d2c0b1d --- /dev/null +++ b/go.sum @@ -0,0 +1,3843 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.64.0/go.mod h1:xfORb36jGvE+6EexW71nMEtL025s3x6xvuYUKM4JLv4= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0 h1:8ZtzmY4a2JIO2sljMbpqkDYxA8aJQveYr3AMa+X40oc= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.6.0/go.mod h1:hyFDG0qSGdHNz8Q6nDN8rYIkld0q/+5uBZaelxiDLfE= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.1.0/go.mod h1:B6ByKcIdYmhoyDzmOnQxyOhN6r05qnewYIxxG6L0/b4= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= +cloud.google.com/go/bigtable v1.5.0/go.mod h1:713PsD2nkJwTioSe6vF/sFCAcjhINJ62cEtKCr8u+F8= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= +cloud.google.com/go/storage v1.4.0/go.mod h1:ZusYJWlOshgSBGbt6K3GnB3MT3H1xs2id9+TCl4fDBA= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.7.0/go.mod h1:jGMIBwF+L/tL6WN/W5InNgYYu4HP0DvGB6rQ1mufWfs= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.11.0/go.mod h1:/PAbprKS+5msVYogBmczjWalDXnQ9mr64yEq9YnyPeo= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AlecAivazis/survey/v2 v2.0.5/go.mod h1:WYBhg6f0y/fNYUuesWQc0PKbJcEliGcYHB9sNT3Bg74= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v40.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v42.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v43.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v45.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v55.8.0+incompatible h1:EuccMPzxu67cIE95/mrtwQivLv7ETmURi5IUgLNVug8= +github.com/Azure/azure-sdk-for-go v55.8.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v11.7.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.6/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223pfkRIQUGOK2IE= +github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.1/go.mod h1:5TgH20II424SXIV9YDBsO4rBCKsh39Vbx9DvhJZZ8rU= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.2.0/go.mod h1:WWTbGPvkAg3I4ms2j2s+Zr5xCGwGqTQh+6M2ZqOczkE= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.1/go.mod h1:JfDgiIO1/RPu6z42AdQTyjOoCM2MFhLqSBDvMEkDgcg= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20191115210519-2b2be6cc8ed4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/ChrisTrenkamp/goxpath v0.0.0-20190607011252-c5096ec8773d/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= +github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/controller-filtered-cache v0.3.3 h1:B8INm/FDR5akkOBADNzXFdVFLf+8gXtVatcEP8yQvTM= +github.com/IBM/controller-filtered-cache v0.3.3/go.mod h1:gEDzSQxUwcdScwsw59MTwchTjh6vzLWaSPffIkr85U4= +github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= +github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= +github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= +github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= +github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.13/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/Netflix/go-expect v0.0.0-20190729225929-0e00d9168667/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= +github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190412020505-60e2075261b6/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190620160927-9418d7b0cd0f/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.1.2/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-userdirs v0.0.0-20200915174352-b0c018a67c13/go.mod h1:7kfpUbyCdGJ9fDRCp3fopPQi5+cKNHgTE4ZuNrO71Cw= +github.com/apparentlymart/go-versions v0.0.2-0.20180815153302-64b99f7cb171/go.mod h1:JXY95WvQrPJQtudvNARshgWajS7jNNlM90altXIPNyI= +github.com/apparentlymart/go-versions v1.0.0/go.mod h1:YF5j7IQtrOAOnsGkniupEA5bfCjzd7i14yu0shZavyM= +github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= +github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-proxyproto v0.0.0-20190211145416-68259f75880e/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.15.66/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.28.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.30.24/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.32.3/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.21/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.37.8/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.41/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.40.10 h1:h+xUINuuH/9CwxE7O8mAuW7Aj9E5agfE9jQ3DrJsnA8= +github.com/aws/aws-sdk-go v1.40.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= +github.com/bflad/tfproviderdocs v0.6.0/go.mod h1:W6wVZPtBa6V5bpjaK1eJAoVCL/7B4Amfrld0dro+fHU= +github.com/bflad/tfproviderdocs v0.7.0/go.mod h1:W6wVZPtBa6V5bpjaK1eJAoVCL/7B4Amfrld0dro+fHU= +github.com/bflad/tfproviderlint v0.14.0/go.mod h1:1Jtjs6DPKoyqPrbPyMiy33h0ViO2h831uzoOuikCA60= +github.com/bflad/tfproviderlint v0.18.0/go.mod h1:0fdh7JywihC58Io8AZ+gpcmQtJggse0MCOXF2tMmnAQ= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bmatcuk/doublestar v1.2.1/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bmatcuk/doublestar v1.3.2/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= +github.com/brancz/gojsontoyaml v0.0.0-20191212081931-bf2969bbd742/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= +github.com/brancz/kube-rbac-proxy v0.5.0/go.mod h1:cL2VjiIFGS90Cjh5ZZ8+It6tMcBt8rwvuw2J6Mamnl0= +github.com/brancz/kube-rbac-proxy v0.8.0/go.mod h1:j7cg7LCsVWhOLBwJWxPxn5q/tZpFp6KH1KB6Jq4ixOo= +github.com/brancz/locutus v0.0.0-20210511124350-7a84f4d1bcb3 h1:N7vTNNytk6OFY5WoPJ+cSxxlRbNpCUWdyPW8nDHp0Sw= +github.com/brancz/locutus v0.0.0-20210511124350-7a84f4d1bcb3/go.mod h1:n+EREm6Tinr9eHmIls4DzojRkkA4IrBe6xRpO4HEw0I= +github.com/briankassouf/jose v0.9.2-0.20180619214549-d2569464773f/go.mod h1:HQhVmdUf7dBNwIIdBTivnCDxcf6IZY3/zrb+uKSJz6Y= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= +github.com/btubbs/datetime v0.1.0/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= +github.com/btubbs/datetime v0.1.1/go.mod h1:n2BZ/2ltnRzNiz27aE3wUb2onNttQdC+WFxAoks5jJM= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/c4milo/gotoolkit v0.0.0-20170704181456-e37eeabad07e/go.mod h1:txokOny9wavBtq2PWuHmj1P+eFwpCsj+gQeNNANChfU= +github.com/c4milo/gotoolkit v0.0.0-20190525173301-67483a18c17a/go.mod h1:txokOny9wavBtq2PWuHmj1P+eFwpCsj+gQeNNANChfU= +github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= +github.com/cameront/go-jsonpatch v0.0.0-20180223123257-a8710867776e/go.mod h1:kdPJxKAfR3ZdD+MWYorN1oTdV9+qwJy9jO/0meJmcxU= +github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= +github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= +github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= +github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= +github.com/chromedp/chromedp v0.5.3/go.mod h1:YLdPtndaHQ4rCpSpBG+IPpy9JvX0VD+7aaLxYgYj28w= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v1.6.0 h1:If7CCGnSGsmZ3dZS2ib4sSiu3S39IQPIwrS2dJ6cFXQ= +github.com/cloudflare/cfssl v1.6.0/go.mod h1:9tj9734Opm88JuxpLJPoY6zRGc7XfXM8A+x6nh5/Cqg= +github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/datadriven v0.0.0-20190531201743-edce55837238/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0/go.mod h1:0reAZvVLhna+mtZ5RcHH4W8iGwM7ZEAK3Y8TCgn9+ZQ= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= +github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= +github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= +github.com/coreos/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= +github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/ignition v0.33.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= +github.com/coreos/ignition v0.34.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= +github.com/coreos/ignition/v2 v2.3.0/go.mod h1:85dmM/CERMZXNrJsXqtNLIxR/dn8G9qlL1CmEjCugp0= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/prometheus-operator v0.35.0/go.mod h1:XHYZUStZWcwd1yk/1DjZv/fywqKIyAJ6pSwvIr+v9BQ= +github.com/coreos/prometheus-operator v0.38.0/go.mod h1:xZC7/TgeC0/mBaJk+1H9dbHaiEvLYHgX6Mi1h40UPh8= +github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc/go.mod h1:erio69w1R/aC14D5nfvAXSlE8FT8jt2Hnavc50Dp33A= +github.com/coreos/vcontext v0.0.0-20190529201340-22b159166068/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= +github.com/coreos/vcontext v0.0.0-20200225161404-ee043618d38d/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8= +github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= +github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= +github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= +github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= +github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= +github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk= +github.com/cortexproject/cortex v1.6.1-0.20210215155036-dfededd9f331/go.mod h1:8bRHNDawVx8te5lIqJ+/AcNTyfosYNC34Qah7+jX/8c= +github.com/cortexproject/cortex v1.7.1-0.20210224085859-66d6fb5b0d42/go.mod h1:u2dxcHInYbe45wxhLoWVdlFJyDhXewsMcxtnbq/QbH4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= +github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= +github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= +github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= +github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= +github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/davegardnerisme/deephash v0.0.0-20210406090112-6d072427d830/go.mod h1:ToGe2SdaElKXzEmYLttAgFHy0exxh0wyq9zG7ZjjjYM= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= +github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= +github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= +github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/digitalocean/godo v1.64.2 h1:lJEB2TVIkJydFWJMPtdYOPa2Xwib+smZqq/oUZF8/iA= +github.com/digitalocean/godo v1.64.2/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dmacvicar/terraform-provider-libvirt v0.6.2/go.mod h1:rUzijwUJHukJWZKi6PJXo9aJqGLXeUX6NDEy6O0UATg= +github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustinkirkland/golang-petname v0.0.0-20170105215008-242afa0b4f8a/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= +github.com/dustinkirkland/golang-petname v0.0.0-20170921220637-d3c2ba80e75e/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= +github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0/go.mod h1:V+Qd57rJe8gd4eiGzZyg4h54VLHmYVVw54iMnlAMrF8= +github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= +github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/efficientgo/tools/core v0.0.0-20210201224146-3d78f4d30648 h1:zY9fs6qlXtS/YlrijZ+7vTqduJRybPYwJ8Mjo4zWrS8= +github.com/efficientgo/tools/core v0.0.0-20210201224146-3d78f4d30648/go.mod h1:cFZoHUhKg31xkPnPjhPKFtevnx0Xcg67ptBRxbpaxtk= +github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= +github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.10.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.14.2+incompatible h1:uyx8VgUCryEkh7qbr8rEtrA0rGDEJ73F5lOJdB5m3V8= +github.com/emicklei/go-restful v2.14.2+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA= +github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM= +github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= +github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= +github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= +github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g= +github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= +github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-log/log v0.0.0-20181211034820-a514cf01a3eb/go.mod h1:4mBwpdRMFLiuXZDCwU2lKQFsoSCo72j3HqBK9d81N2M= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-logr/zapr v0.3.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0 h1:Sxpo9PjEHDzhs3FbnGNonvDgWcMW2U7wGTcDDSFSceM= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28 h1:9lYu6axek8LJrVkMVViVirRcpoaCxXX7+sSvmizGVnA= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1 h1:1VgxvehFne1mbChGeCmZ5pc0LxUf6yaACVSIYAR91Xc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod h1:CJP1ZIHwhosNYwIdaHPZK9vHsM3+roNBaZ7U9Of1DXc= +github.com/go-redis/redis/v8 v8.2.3/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= +github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/flect v0.2.1/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A= +github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= +github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= +github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/gocql/gocql v0.0.0-20200121121104-95d072f1b5bb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= +github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/golangci-lint v1.19.1/go.mod h1:2CEc4Fxx3vxDv7g8DyXkHCBF73AOzAymcJAprs2vCps= +github.com/golangci/golangci-lint v1.26.0/go.mod h1:tefbO6RcigFzvTnDC+Y51kntVGgkuCAVsC+mnfbPruc= +github.com/golangci/golangci-lint v1.31.0/go.mod h1:aMQuNCA+NDU5+4jLL5pEuFHoue0IznKE2+/GsFvvs8A= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-jsonnet v0.17.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= +github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200615235658-03e1cf38a040/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210726183535-c50bf4fe5303/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2/go.mod h1:DavVbd41y+b7ukKDmlnPR4nGYmkWXR6vHUkjQNiHPBs= +github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.1.7/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= +github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= +github.com/gookit/color v1.2.9/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/gophercloud v0.0.0-20190212181753-892256c46858/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= +github.com/gophercloud/gophercloud v0.6.1-0.20191025185032-6ad562af8c1f/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= +github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= +github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gophercloud/gophercloud v0.12.1-0.20200821143728-362eb785d617/go.mod h1:w2NJEd88d4igNL1KUHzBsKMvS/ByJTzgltTGWKT7AC8= +github.com/gophercloud/gophercloud v0.12.1-0.20200827191144-bb4781e9de45/go.mod h1:w2NJEd88d4igNL1KUHzBsKMvS/ByJTzgltTGWKT7AC8= +github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gophercloud/gophercloud v0.19.0 h1:zzaIh8W2K5M4AkJhPV1z6O4Sp0FOObzXm61NUmFz3Kw= +github.com/gophercloud/gophercloud v0.19.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/utils v0.0.0-20190124231947-9c3b9f2457ef/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= +github.com/gophercloud/utils v0.0.0-20190313033024-0bcc8e728cb5/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= +github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gophercloud/utils v0.0.0-20200918191848-da0e919a012a/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gophercloud/utils v0.0.0-20201101202656-8677e053dcf1/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gophercloud/utils v0.0.0-20201212031956-9dc30e126fea/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gophercloud/utils v0.0.0-20210113034859-6f548432055a/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= +github.com/gopherjs/gopherjs v0.0.0-20180628210949-0892b62f0d9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.6.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v0.0.0-20160422134519-667fe4e3466a/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v0.0.0-20160922145804-ca9ada445741/go.mod h1:+WVp8kdw6VhyKExm03PAMRn2ZxnPtm58pV0dBVPdhHE= +github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.2.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= +github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= +github.com/hashicorp/aws-sdk-go-base v0.6.0/go.mod h1:2fRjWDv3jJBeN6mVWFHV6hFTNeFBx2gpDLQaZNxUVAY= +github.com/hashicorp/consul v1.10.6 h1:vc2vtdjjWO/o2d68bwlY8yYM/GftMbSbX1+qD4k2nRo= +github.com/hashicorp/consul v1.10.6/go.mod h1:CDc6+iVEHaEYOUlL7vwaLv31j5O1W7Cq9DpYxh4dVEw= +github.com/hashicorp/consul-template v0.22.0/go.mod h1:lHrykBIcPobCuEcIMLJryKxDyk2lUMnQWmffOEONH0k= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= +github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.10.0 h1:r4nkRKOem378GREHlWdLDROSlDkQFf1VeLX+Ee02EdI= +github.com/hashicorp/consul/api v1.10.0/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-azure-helpers v0.4.1/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= +github.com/hashicorp/go-azure-helpers v0.10.0/go.mod h1:YuAtHxm2v74s+IjQwUG88dHBJPd5jL+cXr5BGVzSKhE= +github.com/hashicorp/go-azure-helpers v0.12.0/go.mod h1:Zc3v4DNeX6PDdy7NljlYpnrdac1++qNW0I4U+ofGwpg= +github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-connlimit v0.3.0/go.mod h1:OUj9FGL1tPIhl/2RCfzYHrIiWj+VVPGNyVPnUX8AqS0= +github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= +github.com/hashicorp/go-discover v0.0.0-20200501174627-ad1e96bde088/go.mod h1:vZu6Opqf49xX5lsFAu7iFNewkcVF1sn/wyapZh5ytlg= +github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4= +github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw= +github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-getter v1.4.2-0.20200106182914-9813cbd4eb02/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.10.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.13.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0 h1:8exGP7ego3OmkfksihtSouGMZ+hQrhxx+FVELeXpVPE= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-memdb v1.0.2/go.mod h1:I6dKdmYhZqU0RJSheVEWgTNWdVQH5QvTgIUQ0t/t32M= +github.com/hashicorp/go-memdb v1.3.1/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.2.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.2.2/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= +github.com/hashicorp/go-raftchunking v0.6.1/go.mod h1:cGlg3JtDy7qy6c/3Bu660Mic1JF+7lWqIwCFSb08fX0= +github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY= +github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-slug v0.4.1/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= +github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-tfe v0.3.27/go.mod h1:DVPSW2ogH+M9W1/i50ASgMht8cHP7NxxK0nrY9aFikQ= +github.com/hashicorp/go-tfe v0.8.1/go.mod h1:XAV72S4O1iP8BDaqiaPLmL2B4EE6almocnOn8E8stHc= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= +github.com/hashicorp/hcl/v2 v2.1.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90= +github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= +github.com/hashicorp/hcl/v2 v2.5.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl2 v0.0.0-20190821123243-0c888d1241f6/go.mod h1:Cxv+IJLuBiEhQ7pBYGEuORa0nr4U994pE8mYLuFd7v0= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/hil v0.0.0-20190212132231-97b3a9cdfa93/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.3/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= +github.com/hashicorp/nomad/api v0.0.0-20190412184103-1c38ced33adf/go.mod h1:BDngVi1f4UA6aJq9WYTgxhfWSE1+42xshvstLU2fRGk= +github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.3.2/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft-autopilot v0.1.5/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-snapshot v1.0.2-0.20190827162939-8117efcc5aab/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= +github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= +github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= +github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/terraform v0.12.21/go.mod h1:eJcloDEx5ywM4a1tetIuVrlqklM0bUVRYJBYAh4CYzA= +github.com/hashicorp/terraform v0.13.4/go.mod h1:1H1qcnppNc/bBGc7poOfnmmBeQMlF0stEN3haY3emCU= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190821133035-82a99dc22ef4/go.mod h1:JDmizlhaP5P0rYTTZB0reDMefAiJyfWPEtugV4in1oI= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191115094559-17f92b0546e8/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191121111010-e9629612a215/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= +github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= +github.com/hashicorp/terraform-config-inspect v0.0.0-20200806211835-c481b8bfa41e/go.mod h1:Z0Nnk4+3Cy89smEbrq+sl1bxc9198gIP4I7wcQF6Kqs= +github.com/hashicorp/terraform-exec v0.1.1/go.mod h1:yKWvMPtkTaHpeAmllw+1qdHZ7E5u+pAZ+x8e2jQF6gM= +github.com/hashicorp/terraform-exec v0.3.0/go.mod h1:yKWvMPtkTaHpeAmllw+1qdHZ7E5u+pAZ+x8e2jQF6gM= +github.com/hashicorp/terraform-exec v0.10.0/go.mod h1:tOT8j1J8rP05bZBGWXfMyU3HkLi1LWyqL3Bzsc3CJjo= +github.com/hashicorp/terraform-json v0.4.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-json v0.6.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/hashicorp/terraform-plugin-sdk v1.0.0/go.mod h1:NuwtLpEpPsFaKJPJNGtMcn9vlhe6Ofe+Y6NqXhJgV2M= +github.com/hashicorp/terraform-plugin-sdk v1.4.0/go.mod h1:H5QLx/uhwfxBZ59Bc5SqT19M4i+fYt7LZjHTpbLZiAg= +github.com/hashicorp/terraform-plugin-sdk v1.6.0/go.mod h1:H5QLx/uhwfxBZ59Bc5SqT19M4i+fYt7LZjHTpbLZiAg= +github.com/hashicorp/terraform-plugin-sdk v1.7.0/go.mod h1:OjgQmey5VxnPej/buEhe+YqKm0KNvV3QqU4hkqHqPCY= +github.com/hashicorp/terraform-plugin-sdk v1.8.0/go.mod h1:OjgQmey5VxnPej/buEhe+YqKm0KNvV3QqU4hkqHqPCY= +github.com/hashicorp/terraform-plugin-sdk v1.9.0/go.mod h1:C/AXwmDHqbc3h6URiHpIsVKrwV4PS0Sh0+VTaeEkShw= +github.com/hashicorp/terraform-plugin-sdk v1.11.0/go.mod h1:HiWIPD/T9HixIhQUwaSoDQxo4BLFdmiBi/Qz5gjB8Q0= +github.com/hashicorp/terraform-plugin-sdk v1.12.0/go.mod h1:HiWIPD/T9HixIhQUwaSoDQxo4BLFdmiBi/Qz5gjB8Q0= +github.com/hashicorp/terraform-plugin-sdk v1.13.1/go.mod h1:HiWIPD/T9HixIhQUwaSoDQxo4BLFdmiBi/Qz5gjB8Q0= +github.com/hashicorp/terraform-plugin-sdk v1.14.0/go.mod h1:t62Xy+m7Zjq5tA2vrs8Wuo/TQ0sc9Mx9MjXL3+7MHBQ= +github.com/hashicorp/terraform-plugin-sdk v1.15.0/go.mod h1:PuFTln8urDmRM6mV0II6apOTsyG/iHkxp+5W11eJE58= +github.com/hashicorp/terraform-plugin-sdk v1.16.0/go.mod h1:5sVxrwW6/xzFhZyql+Q9zXCUEJaGWcBIxBbZFLpVXOI= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.0.0/go.mod h1:xOf85UtHJ0/9/EF3eKgZFlJ6feN8sDtjQRWRHhimCUw= +github.com/hashicorp/terraform-plugin-test v1.2.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= +github.com/hashicorp/terraform-plugin-test v1.3.0/go.mod h1:QIJHYz8j+xJtdtLrFTlzQVC0ocr3rf/OjIpgZLK56Hs= +github.com/hashicorp/terraform-plugin-test v1.4.3/go.mod h1:UA7z/02pgqsRLut4DJIPm0Hjnj27uOvhi19c8kTqIfM= +github.com/hashicorp/terraform-plugin-test/v2 v2.0.0-20200724200815-faa9931ac59e/go.mod h1:C6VALgUlvaif+PnHyRGKWPTdQkMJK4NQ20VJolxZLI0= +github.com/hashicorp/terraform-plugin-test/v2 v2.1.2/go.mod h1:jerO5mrd+jVNALy8aiq+VZOg/CR8T2T1QR3jd6JKGOI= +github.com/hashicorp/terraform-provider-google v1.20.1-0.20200824213103-e32ba28ec398/go.mod h1:qc51ZGlewTr4W9ho2SxvlPETQmDyeN/kC27b1mTQuQw= +github.com/hashicorp/terraform-provider-kubernetes v1.13.3/go.mod h1:XSLp2+OcV4wZQ6VhgHswDwz4LJwiwO1/vbJYKt+gqWg= +github.com/hashicorp/terraform-svchost v0.0.0-20191011084731-65d371908596/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/terraform-svchost v0.0.0-20191119180714-d2e4933b9136/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= +github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/hashicorp/vault v1.3.0/go.mod h1:b06flBzBl5H4rWbHg/Hcbvos1vlklo50J05XqIS79Co= +github.com/hashicorp/vault-plugin-auth-alicloud v0.5.2-0.20190814210027-93970f08f2ec/go.mod h1:TYFfVFgKF9x92T7uXouI9rLPkNnyXo/KkNcj5t+mjdM= +github.com/hashicorp/vault-plugin-auth-azure v0.5.2-0.20190814210035-08e00d801115/go.mod h1:sRhTnkcbjJgPeES0ddCTq8S2waSakyMiWLUwO5J/Wjk= +github.com/hashicorp/vault-plugin-auth-centrify v0.5.2-0.20190814210042-090ec2ed93ce/go.mod h1:WstOCHERNbk2dblnY5MV9Qeh/hzTSQpVs5xPuyAzlBo= +github.com/hashicorp/vault-plugin-auth-cf v0.0.0-20190821162840-1c2205826fee/go.mod h1:zOag32+pm1R4FFNhXMLP506Oesjoai3gHEEpxqUaTr0= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.1/go.mod h1:eLj92eX8MPI4vY1jaazVLF2sVbSAJ3LRHLRhF/pUmlI= +github.com/hashicorp/vault-plugin-auth-gcp v0.5.2-0.20190930204802-acfd134850c2/go.mod h1:j0hMnnTD44zXGQhLM1jarYDaTmSp6OPiOzgFQ6mNgzc= +github.com/hashicorp/vault-plugin-auth-jwt v0.5.2-0.20191010173058-65cf93bad3f2/go.mod h1:Ti2NPndKhSGpSL6gWg11n7TkmuI7318BIPeojayIVRU= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.5.2-0.20190925162726-2e5b0b8184e6/go.mod h1:qkrONCr71ckSCTItJQ1j9uet/faieZJ5c7+GZugTm7s= +github.com/hashicorp/vault-plugin-auth-oci v0.0.0-20190904175623-97c0c0187c5c/go.mod h1:YAl51RsYRihPbSdnug1NsvutzbRVfrZ12FjEIvSiOTs= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.0.0-20190814210117-e079e01fbb93/go.mod h1:N9XpfMXjeLHBgUd8iy4avOC4mCSqUC7B/R8AtCYhcfE= +github.com/hashicorp/vault-plugin-secrets-ad v0.6.1-0.20191108162300-8f4121d78b9c/go.mod h1:Nmxv/d6tFm0lr8gbFIF+Hj+0xYcBiyfEwX2FscpbhbQ= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.5.2-0.20190814210129-4d18bec92f56/go.mod h1:hJ42zFd3bHyE8O2liBUG+VPY0JxdMrj51TOwVGViUIU= +github.com/hashicorp/vault-plugin-secrets-azure v0.5.2/go.mod h1:SBc53adxMmf+o8zqRbqYvq+nuSrz8OHYmgmPfxVMJEo= +github.com/hashicorp/vault-plugin-secrets-gcp v0.5.3-0.20191112195538-3c798536d157/go.mod h1:Sc+ba3kscakE5a/pi8JJhWvXWok3cpt1P77DApmUuDc= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.5.2-0.20190814210149-315cdbf5de6e/go.mod h1:5prAHuCcBiyv+xfGBviTVYeDQUhmQYN7WrxC2gMRWeQ= +github.com/hashicorp/vault-plugin-secrets-kv v0.5.2-0.20191017213228-e8cf7060a4d0/go.mod h1:H0VKQagsJoK9o2qpULMgbspuWVnFe3G4S/K7f0Dr8qY= +github.com/hashicorp/vault/api v1.0.1/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/api v1.0.5-0.20190730042357-746c0b111519/go.mod h1:i9PKqwFko/s/aihU1uuHGh/FaQS+Xcgvd9dvnfAvQb0= +github.com/hashicorp/vault/api v1.0.5-0.20190814205728-e9c5cd8aca98/go.mod h1:t4IAg1Is4bLUtTq8cGgeUh0I8oDRBXPk2bM1Jvg/nWA= +github.com/hashicorp/vault/api v1.0.5-0.20191017194845-99f7184d3326/go.mod h1:8vZ3PoohxqemJEi//WSVsaMKwwXyyfP8zt9KHgBVhKU= +github.com/hashicorp/vault/api v1.0.5-0.20191108162021-73e607235264/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/api v1.0.5-0.20191108163347-bdd38fca2cff/go.mod h1:Uf8LaHyrYsgVgHzO2tMZKhqRGlL3UJ6XaSwW2EA1Iqo= +github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= +github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.1.14-0.20190730042320-0dc007d98cc8/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.1.14-0.20190814205504-1cad00d1133b/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.1.14-0.20190919081434-645ac174deeb/go.mod h1:wcxXjskBz2VmyZm4MKNoLCOqsQEKkyBAUIP2YBTJL1g= +github.com/hashicorp/vault/sdk v0.1.14-0.20191017211055-9bd3a27a36c4/go.mod h1:tXLVOeyErHGojiim3hA6DUSxcRisohZbpATIpln8JsE= +github.com/hashicorp/vault/sdk v0.1.14-0.20191108161836-82f2b5571044/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= +github.com/hashicorp/vault/sdk v0.1.14-0.20191112033314-390e96e22eb2/go.mod h1:PcekaFGiPJyHnFy+NZhP6ll650zEw51Ag7g/YEa+EOU= +github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20210826001029-26ff87cf9493/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/helm/helm-2to3 v0.5.1/go.mod h1:AXFpQX2cSQpss+47ROPEeu7Sm4+CRJ1jKWCEQdHP3/c= +github.com/heptio/velero v1.0.0/go.mod h1:Q8aj4N9pmvDNp2hWmqWJ1Z1ybUjEI+iQOP3C9hx2njQ= +github.com/hetznercloud/hcloud-go v1.28.0 h1:T2a0CVGETf7BoWIdZ/TACqmTZAa/ROutcfdUHYiPAQ4= +github.com/hetznercloud/hcloud-go v1.28.0/go.mod h1:2C5uMtBiMoFr3m7lBFPf7wXTdh33CevmZpQIIDPGYJI= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/hooklift/assert v0.0.0-20170704181755-9d1defd6d214/go.mod h1:kj6hFWqfwSjFjLnYW5PK1DoxZ4O0uapwHRmd9jhln4E= +github.com/hooklift/iso9660 v1.0.0/go.mod h1:sOC47ru8lB0DlU0EZ7BJ0KCP5rDqOvx0c/5K5ADm8H0= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.0.0-20171009183408-7fe0c75c13ab/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I= +github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= +github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= +github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= +github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jen20/awspolicyequivalence v1.1.0/go.mod h1:PV1fS2xyHhCLp83vbgSMFr2drM4GzG61wkz+k4pOG3E= +github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= +github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a/go.mod h1:yL958EeXv8Ylng6IfnvG4oflryUi3vgA3xPs9hmII1s= +github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/johannesboyne/gofakes3 v0.0.0-20200218152459-de0855a40bc1/go.mod h1:fNiSoOiEI5KlkWXn26OwKnNe58ilTIkpBlgOrt7Olu8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v0.0.0-20190112182421-51ffac552869/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jsonnet-bundler/jsonnet-bundler v0.1.0/go.mod h1:YKsSFc9VFhhLITkJS3X2PrRqWG9u2Jq99udTdDjQLfM= +github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= +github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20191119172530-79f836b90111/go.mod h1:MP2HbArq3QT+oVp8pmtHNZnSnkhdkHtDnc7h6nJXmBU= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/katbyte/terrafmt v0.2.1-0.20200303174203-e6a3e82cb21b/go.mod h1:WRq5tDmK04tcYbEr400zAUWtOK0jix54e8YeHP3IoQg= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/keybase/go-crypto v0.0.0-20190828182435-a05457805304/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.1.0/go.mod h1:+UBTfd78habUYWFbNWTJNG+jNG/i/lGURakr4A/yNRw= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFtnBXsd1vPOZaLsESovai349994nHx3e0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= +github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= +github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.7/go.mod h1:h1rDl2Kdj97+Kwh4gdz3ujE7XHmH51Q0lUiZ1z4NLj8= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/libvirt/libvirt-go v4.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/libvirt/libvirt-go v5.0.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/libvirt/libvirt-go v5.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= +github.com/libvirt/libvirt-go-xml v4.10.0+incompatible/go.mod h1:oBlgD3xOA01ihiK5stbhFzvieyW+jVS6kbbsMVF623A= +github.com/libvirt/libvirt-go-xml v5.0.0+incompatible/go.mod h1:oBlgD3xOA01ihiK5stbhFzvieyW+jVS6kbbsMVF623A= +github.com/libvirt/libvirt-go-xml v5.10.0+incompatible/go.mod h1:oBlgD3xOA01ihiK5stbhFzvieyW+jVS6kbbsMVF623A= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/likexian/gokit v0.0.0-20190309162924-0a377eecf7aa/go.mod h1:QdfYv6y6qPA9pbBA2qXtoT8BMKha6UyNbxWGWl/9Jfk= +github.com/likexian/gokit v0.0.0-20190418170008-ace88ad0983b/go.mod h1:KKqSnk/VVSW8kEyO2vVCXoanzEutKdlBAPohmGXkxCk= +github.com/likexian/gokit v0.0.0-20190501133040-e77ea8b19cdc/go.mod h1:3kvONayqCaj+UgrRZGpgfXzHdMYCAO0KAt4/8n0L57Y= +github.com/likexian/gokit v0.20.15/go.mod h1:kn+nTv3tqh6yhor9BC4Lfiu58SmH8NmQ2PmEl+uM6nU= +github.com/likexian/simplejson-go v0.0.0-20190409170913-40473a74d76d/go.mod h1:Typ1BfnATYtZ/+/shXfFYLrovhFyuKvzwrdOnIDHlmg= +github.com/likexian/simplejson-go v0.0.0-20190419151922-c1f9f0b4f084/go.mod h1:U4O1vIJvIKwbMZKUJ62lppfdvkCdVd2nfMimHK81eec= +github.com/likexian/simplejson-go v0.0.0-20190502021454-d8787b4bfa0b/go.mod h1:3BWwtmKP9cXWwYCr5bkoVDEfLywacOv0s06OBEDpyt8= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/linode/linodego v0.31.0 h1:99+t6GtTaIsL+ncz5BpCbkh8Y90rC7qW1Not8MFe0Gw= +github.com/linode/linodego v0.31.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= +github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= +github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.2/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190620125010-da37f6c1e481/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maorfr/helm-plugin-utils v0.0.0-20200216074820-36d2fcf6ae86/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/martinlindhe/base36 v1.0.0/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= +github.com/masterzen/winrm v0.0.0-20190308153735-1d17eaf15943/go.mod h1:bsMsaiOA3CXjbJxW0a94G4PfPDj9zUmH5JoFuJ9P4o0= +github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88/go.mod h1:a2HXwefeat3evJHxFXSayvRHpYEPJYtErl4uIzfaUqY= +github.com/matoous/godox v0.0.0-20190910121045-032ad8106c86/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8= +github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= +github.com/metal3-io/baremetal-operator/apis v0.0.0-20211013091910-8feb172f0068/go.mod h1:/pror7LknTgduckyCOm/EhnjFKIZy3mXQHr2GdaQ3kQ= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= +github.com/michaelklishin/rabbit-hole v0.0.0-20191008194146-93d9988f0cd5/go.mod h1:+pmbihVqjC3GPdfWv1V2TnRSuVvwrWLKfEP/MZVB/Wc= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= +github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= +github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v6 v6.0.56/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= +github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= +github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1-0.20210112042008-8ebf2d61a8b4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/packer v1.3.2/go.mod h1:3TnGTkplC/koV8K6bCfCN1NB34Tye7lmUzo55/X5wqw= +github.com/mitchellh/panicwrap v1.0.0/go.mod h1:pKvZHwWrZowLUzftuFq7coarnxbBXU4aQh3N0BJOeeA= +github.com/mitchellh/pointerstructure v0.0.0-20190430161007-f252a8fd71c8/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= +github.com/mitchellh/pointerstructure v1.0.0/go.mod h1:k4XwG94++jLVsSiTxo7qdIfXA9pj9EAeo0QsNNJOLZ8= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/monopole/mdrip v1.0.0/go.mod h1:N1/ppRG9CaPeUKAUHZ3dUlfOT81lTpKZLkyhCvTETwM= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20200220173314-aae45faa4006/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= +github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nirarg/terraform-provider-kubevirt v0.0.0-20201222125919-101cee051ed3/go.mod h1:FMugN9a6XOJm9mLFEV/+F4IJzdZmpLn/OaNRa8S/Ens= +github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-cluster-management/api v0.0.0-20210511122802-f38973154cbd/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= +github.com/open-cluster-management/multicloud-operators-channel v1.0.1-0.20200604182604-841a36e63aa9/go.mod h1:IIME1guAHnyD0E6/Z6H8UjPflJvdAkNUoAw7cuTkbaY= +github.com/open-cluster-management/multicloud-operators-deployable v0.0.0-20200603180154-d1d17d718c30/go.mod h1:ysimCqQtXj9F+LPHKRZoFkN/d/pXYfZTsF5bqkdEaNs= +github.com/open-cluster-management/multicloud-operators-placementrule v1.0.1-2020-05-28-18-29-00.0.20200603172904-efde26079087/go.mod h1:871ea21VnKsCByS6u8fHXXTW94PlIWdQKw5bf/amBr8= +github.com/open-cluster-management/multicloud-operators-subscription v1.0.0-2020-05-12-21-17-19.0.20200610014526-1e0e8c0acfad/go.mod h1:CFx6SlVQtyvlYUK4KEBCAXXVwEk2tTBuJsyb+LfU89g= +github.com/open-cluster-management/multicloud-operators-subscription-release v1.0.1-2020-05-28-18-29-00.0.20200603160156-4d66bd136ba3/go.mod h1:UTzlywmLkZVU8kckKskqmXhAO/JB0OXQHk1+DvEDBeM= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/openshift-metal3/terraform-provider-ironic v0.2.3/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= +github.com/openshift-metal3/terraform-provider-ironic v0.2.4/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= +github.com/openshift/api v0.0.0-20210331193751-3acddb19d360 h1:EGWKZ4foeELg9R+0OaLXKUoqHmtUwAMq0fCBUirbKwY= +github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe/go.mod h1:DOgBIuBcXuTD8uub0jL7h6gBdIBt3CFrwz6K2FtfMBA= +github.com/openshift/build-machinery-go v0.0.0-20200713135615-1f43d26dccc7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 h1:7NmjUkJtGHpMTE/n8ia6itbCdZ7eYuTCXKc/zsA7OSM= +github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= +github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e/go.mod h1:iPn+uhIe7nkP5BMHe2QnbLtg5m/AIQ1xvz9s3cig5ss= +github.com/openshift/cluster-api v0.0.0-20190805113604-f8de78af80fc/go.mod h1:mNsD1dsD4T57kV4/C6zTHke/Ro166xgnyyRZqkamiEU= +github.com/openshift/cluster-api v0.0.0-20191030113141-9a3a7bbe9258/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= +github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= +github.com/openshift/cluster-api-actuator-pkg v0.0.0-20190614215203-42228d06a2ca/go.mod h1:KNPaA64x3Ok7z538kvS2acwC5fEwvPfF0RdTx2geQEE= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837/go.mod h1:aXOt4gMtzXQxymPRm98vJAVmGjDhcTXsrQHauiNJK3o= +github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b/go.mod h1:LPNjFna6F+ePHaXM/7QIyCF0sLsEtfuN16yY9sFZJ40= +github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d/go.mod h1:S+wtA0Rm2FZ5ccC9zNQXUWUDesR6Jsdn5eb6HjAR+Gs= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156/go.mod h1:KCyjaBfEkifs9bqV1HEXDJUyQylgeLSqiqt2QnMn7is= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200701112720-3a7d727c9a10/go.mod h1:wgkZrOlcIMWTzo8khB4Js2PoDJDlIUUdzCBm7BuDdqw= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200713133651-5c8a640669ac/go.mod h1:XVYX9JE339nKbDDa/W481XD+1GTeqeaBm8bDPr7WE7I= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200901173901-9056dbc8c9b9/go.mod h1:rcwAydGZX+z4l91wtOdbq+fqDwuo6iu0YuFik3UUc+8= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570/go.mod h1:7NRECVE26rvP1/fs1CbhfY5gsgnnFQNhb9txTFzWmUw= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64ab/go.mod h1:S38HjVtBmaX6PHq99updVereupkHcwcOEM5jq6rTILI= +github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201203141909-4dc702fd57a5/go.mod h1:/XjFaKnqBc8K/jcRXHO7tau39CmzNinqmpxYaQGRvnE= +github.com/openshift/cluster-api-provider-kubevirt v0.0.0-20201214114543-e5aed9c73f1f/go.mod h1:Moiq8vUJ4IdTaJBxIA756FFJ4GgVXZAiOds7lTpZ1kQ= +github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603/go.mod h1:7pQ9Bzha+ug/5zd+0ufbDEcnn2OnNlPwRwYrzhXk4NM= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e/go.mod h1:1DDDZ7uXsauiUvCDUxq6XmsToaTh9WipPoW1qASkL9c= +github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43/go.mod h1:Vl/bvZulLw6PdUADIFWGfoTWH1O4L1B80eN7BtLYEuo= +github.com/openshift/cluster-autoscaler-operator v0.0.0-20190521201101-62768a6ba480/go.mod h1:/XmV44Fh28Vo3Ye93qFrxAbcFJ/Uy+7LPD+jGjmfJYc= +github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660 h1:Uu4FRbRt8SvN85H+HXshPcDYepPnVp4Ju9VZZFxcDhU= +github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660/go.mod h1:0/kDYY2vkaFz/O1ZpFyzMKCULRIt5Bg9SBN2+iYZ2qs= +github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible/go.mod h1:0BbpR1mrN0F2ZRae5N1XHcytmkvVPaeKgSQwRRBWugc= +github.com/openshift/custom-resource-status v0.0.0-20190822192428-e62f2f3b79f3/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= +github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= +github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= +github.com/openshift/hive v1.0.18-0.20210129211840-21bce609f1f4/go.mod h1:Hv5v6eoEY4SrYZE2TWA7euqY5ldyxg+YH6pR9RzUv8I= +github.com/openshift/hive v1.1.16/go.mod h1:QJY97wHcEv7LTCB5tStmo9JT6E2LHSF8m73fFVz/Aj8= +github.com/openshift/hive/apis v0.0.0-20210802140536-4d8d83dcd464/go.mod h1:77ODrnaHiDlfbqQgvk5nUWuqf2AsGY/99QlfNTiqHwI= +github.com/openshift/installer v0.9.0-master.0.20201103204150-888dc5bab60c/go.mod h1:FsTPqP4aUu5/bcxbtRyWFF7n/YXR/d/VRe7+hTNldGA= +github.com/openshift/installer v0.9.0-master.0.20210211002944-d237b9dee575/go.mod h1:SWnBsRHNJNZCUJUMyyWbxljcePsR/hCUEY38GwhSIA8= +github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78 h1:BxSKP/SI7A42pPCJfOM1pj5uU5A8L5mCz3Vyer2l88U= +github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78/go.mod h1:Ld7NVItdAwI99lCHbys/n88rIMQjeS7PyXA4NL4yImM= +github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe h1:MJqGN0NVONnTLDYPVIEH4uo6i3gAK7LAkhLnyFO8Je0= +github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe/go.mod h1:NI6xOQGuTnLXeHW8Z2glKSFhF7X+YxlAlqlBMaK0zEM= +github.com/openshift/machine-api-operator v0.0.0-20190312153711-9650e16c9880/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= +github.com/openshift/machine-api-operator v0.2.1-0.20191128180243-986b771e661d/go.mod h1:9qQPF00anuIsc6RiHYfHE0+cZZImbvFNLln0NRBVVMg= +github.com/openshift/machine-api-operator v0.2.1-0.20200402110321-4f3602b96da3/go.mod h1:46g2eLjzAcaNURYDvhGu0GhyjKzOlCPqixEo68lFBLs= +github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290/go.mod h1:QkhH+/6BXabl+4HmiLwx9/bmW1ieCGF9km7xz22Ozl0= +github.com/openshift/machine-api-operator v0.2.1-0.20200611014855-9a69f85c32dd/go.mod h1:6vMi+R3xqznBdq5rgeal9N3ak3sOpy50t0fdRCcQXjE= +github.com/openshift/machine-api-operator v0.2.1-0.20200701225707-950912b03628/go.mod h1:cxjy/RUzv5C2T5FNl1KKXUgtakWsezWQ642B/CD9VQA= +github.com/openshift/machine-api-operator v0.2.1-0.20200722104429-f4f9b84df9b7/go.mod h1:XDsNRAVEJtkI00e51SAZ/PnqNJl1zv0rHXSdl9L1oOY= +github.com/openshift/machine-api-operator v0.2.1-0.20200926044412-b7d860f8074c/go.mod h1:cp/wPVzxHZeLUjOLkNPNqrk4wyyW6HuHd3Kz9+hl5xw= +github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597/go.mod h1:+oAfoCl+TUd2TM79/6NdqLpFUHIJpmqkKdmiHe2O7mw= +github.com/openshift/machine-api-operator v0.2.1-0.20201111151924-77300d0c997a/go.mod h1:XQN83eD5YoXEkla3di+exKIpLYx/ApLAOe0EE66Q+hw= +github.com/openshift/machine-config-operator v0.0.0/go.mod h1:4IzikyGmUVQwlohScKeaAr5n2YzcWXkZvTMGGxDcU2Q= +github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= +github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= +github.com/openshift/terraform-provider-aws v1.60.1-0.20200630224953-76d1fb4e5699/go.mod h1:0U3OgA2uDYSc7gNkdWA92+/BxWXwuYhWqqZ4UhM1RCw= +github.com/openshift/terraform-provider-azurerm v1.40.1-0.20200707062554-97ea089cc12a/go.mod h1:9VGDn8x+Pz/j5vQ8nseuH+YsKyxpGYx+faT9b9fqCWQ= +github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785/go.mod h1:C+iumr2ni468+1jvcHXLCdqP9uQnoQbdX93F3aWahWU= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= +github.com/operator-framework/api v0.3.4/go.mod h1:TmRmw+8XOUaDPq6SP9gA8cIexNf/Pq8LMFY7YaKQFTs= +github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/operator-lifecycle-manager v0.0.0-20200321030439-57b580e57e88/go.mod h1:7Ut8p9jJ8C6RZyyhZfZypmlibCIJwK5Wcc+WZDgLkOA= +github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= +github.com/operator-framework/operator-registry v1.6.1/go.mod h1:sx4wWMiZtYhlUiaKscg3QQUPPM/c1bkrAs4n4KipDb4= +github.com/operator-framework/operator-registry v1.6.2-0.20200330184612-11867930adb5/go.mod h1:SHff373z8asEkPo6aWpN0qId4Y/feQTjZxRF8PRhti8= +github.com/operator-framework/operator-registry v1.12.1/go.mod h1:rf4b/h77GUv1+geiej2KzGRQr8iBLF4dXNwr5AuGkrQ= +github.com/operator-framework/operator-registry v1.12.4/go.mod h1:JChIivJVLE1wRbgIhDFzYQYT9yosa2wd6qiTyMuG5mg= +github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA= +github.com/operator-framework/operator-sdk v0.17.0/go.mod h1:wmYi08aoUmtgfoUamURmssI4dkdFGNtSI1Egj+ZfBnk= +github.com/operator-framework/operator-sdk v0.18.0/go.mod h1:xP/DNvnYnIoGK1bLKiD0s7aYZp2fa4AI6t1v3INaoZg= +github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= +github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= +github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw= +github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/ovirt/go-ovirt v0.0.0-20200313072907-d30f754823a6/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= +github.com/ovirt/go-ovirt v0.0.0-20200428093010-9bcc4fd4e6c0/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= +github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= +github.com/ovirt/go-ovirt v0.0.0-20210112072624-e4d3b104de71/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= +github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200914080915-c4444fb5c201/go.mod h1:XFDLN/srNA1s2Dq+gp4zBvql6nRnfNJzDGzI5vtK85g= +github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20210118101701-cc657a8c6634/go.mod h1:LDHfgu36xGyr0tUPZpL+a7HRovpRzlcNiu0CmPcxcUI= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus-community/prom-label-proxy v0.2.0/go.mod h1:XdjyZg7LCbCC5FADHtpgNp6kQ0W9beXVGfmcvndMj5Y= +github.com/prometheus-community/prom-label-proxy v0.3.0 h1:zUPx5+uJAcQnN69viABKI25j6OIndeOZmk23PzqyDcE= +github.com/prometheus-community/prom-label-proxy v0.3.0/go.mod h1:jutqaL/NXY/RHaF+nU/pkkIKKSX6ATuLrMNlTO4A4WU= +github.com/prometheus-operator/prometheus-operator v0.47.1 h1:RSfCvN/seBEftFG67WTKXhQgt6ffEg1HspUyLFMY6+4= +github.com/prometheus-operator/prometheus-operator v0.47.1/go.mod h1:frnWN4rQxrDagrzyhIOtnzm7t5aXeXNcXpypSW7g8oQ= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.47.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.1 h1:OGC7+ktZ6h8xI99VB6i8iuiXecdhUmwto9vbGzoVMac= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.48.1/go.mod h1:3WYi4xqXxGGXWDdQIITnLNmuDzO5n6wYva9spVhR4fg= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.47.1 h1:/skMcN9Dpl82Hm0UKzoteUSMbIn8nVyCk9IbjVIuS+I= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.47.1/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA= +github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= +github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= +github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= +github.com/prometheus/alertmanager v0.22.2 h1:JrDZalSEMb2/2bqGAhls6ZnvOxbC5jMIu29JV+uWTC0= +github.com/prometheus/alertmanager v0.22.2/go.mod h1:rYinOWxFuCnNssc3iOjn2oMTlhLaPcUuqV5yk5JKUAE= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.1.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.20.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= +github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190227231451-bbced9601137/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v1.8.2-0.20210811141203-dcb07e8eac34 h1:I18LpDzCaeu8P9UbJtssh69dnvYm8Pb5CEOe38sSTo4= +github.com/prometheus/prometheus v1.8.2-0.20210811141203-dcb07e8eac34/go.mod h1:ZHczEifRAgXT0ypud2xADA4wVWymlQeZiPAzEvTNDas= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= +github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rickb777/date v1.12.5-0.20200422084442-6300e543c4d9/go.mod h1:L8WrssTzvgYw34/Ppa0JpJfI7KKXZ2cVGI6Djt0brUU= +github.com/rickb777/plural v1.2.0/go.mod h1:UdpyWFCGbo3mvK3f/PfZOAOrkjzJlYN/sD46XNWJ+Es= +github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= +github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= +github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/uuid v0.0.0-20160927100844-b061729afc07/go.mod h1:B8HLsPLik/YNn6KKWVMDJ8nzCL8RP5WyfsnmvnAEwIU= +github.com/satori/uuid v1.2.0/go.mod h1:B8HLsPLik/YNn6KKWVMDJ8nzCL8RP5WyfsnmvnAEwIU= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec v0.0.0-20190912120752-140048b2a218/go.mod h1:q6oYAujd2qyeU4cJqIri4LBIgdHXGvxWHZ1E29HNFRE= +github.com/securego/gosec v0.0.0-20200316084457-7da9f46445fd/go.mod h1:NurAFZsWJAEZjogSwdVPlHkOZB3DOAU7gsPP8VFZCHc= +github.com/securego/gosec/v2 v2.4.0/go.mod h1:0/Q4cjmlFDfDUj1+Fib61sc+U5IQb2w+Iv9/C3wPVko= +github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.20.10/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180725160413-e900ae048470/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/sourcegraph/go-diff v0.6.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.3.5/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.0.2/go.mod h1:eLfe5bL3qbL7ep/KafHzthxejrOF5J3xmt03uL5tzek= +github.com/stoewer/go-strcase v1.1.0/go.mod h1:G7YglbHPK5jX3JcWljxVXRXPh90/dtxfy6xWqxu5b90= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99 h1:VKw5JiV28Q1kB4GH34zzJvLGR4qQQt+XT8EYVTULSjQ= +github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99/go.mod h1:OGgYyIj05jrIlLYF+oczqidvHLKPEdum/fN0aLBjPhQ= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f h1:HJ286o7ylkYi3vhycfxz17yZQ/GZtCbihaZogGavDbo= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f/go.mod h1:iKow3tj3j8B4al2v4agj/ePBVVh8phRrPDj/MkWqT9c= +github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799 h1:eQz6z2yAT4guRBUxwHaYxSeWB2yFhMgLHOHPjZih2rs= +github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799/go.mod h1:3TfJn5Ot5u8q2aSag1tw3vlqlM6U4Gut3ktydX1ZoBQ= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= +github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk= +github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= +github.com/terraform-provider-openstack/terraform-provider-openstack v1.32.0/go.mod h1:Xm/accuOkyS8NkNp9HwqNMPu5rAFnHP7g/2uNRZbO8c= +github.com/terraform-provider-openstack/terraform-provider-openstack v1.33.0/go.mod h1:NA2Iaq+p8yIzeHAY9DHEedL/SqrT0AInYP9GTqVLe1k= +github.com/terraform-providers/terraform-provider-azuread v0.9.0/go.mod h1:sSDzB/8CD639+yWo5lZf+NJvGSYQBSS6z+GoET9IrzE= +github.com/terraform-providers/terraform-provider-google v1.20.1-0.20200623174414-27107f2ee160/go.mod h1:QxehqxV8Swl+O2JXJUdS6orHYJXWUEr4HFfYH5JV9ew= +github.com/terraform-providers/terraform-provider-ignition v1.2.1/go.mod h1:tUlGVBhkz+z79iffnt7vKISS199MdPd85+l6SNpoS/s= +github.com/terraform-providers/terraform-provider-local v1.4.0/go.mod h1:nbnWkAjiiG0FHlsfYYMRfBwvDbo6eLjorQs/mmRGi14= +github.com/terraform-providers/terraform-provider-null v1.0.1-0.20191204185112-e5c592237f62/go.mod h1:RaAgicYv+oKLyZpaQB5BOkwL/t5WKYHQ+Q0kgMgXgR4= +github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew= +github.com/terraform-providers/terraform-provider-random v0.0.0-20190925200408-30dac3233094/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= +github.com/terraform-providers/terraform-provider-random v1.3.2-0.20190925210718-83518d96ae4f/go.mod h1:F4KE9YftuJyMiBth4W1kCrsyOHndtTjAmZ+ZzjqWY+4= +github.com/terraform-providers/terraform-provider-template v1.0.0/go.mod h1:/J+B8me5DCMa0rEBH5ic2aKPjhtpWNeScmxFJWxB1EU= +github.com/terraform-providers/terraform-provider-vsphere v1.16.2/go.mod h1:yTPDOvhy5A/PX5z/sKq0FthWz1m38QKRW+P+SJDYbeo= +github.com/tetafro/godot v0.3.3/go.mod h1:pT6/T8+h6//L/LwQcFc4C0xpfy1euZwzS1sHdrFCms0= +github.com/tetafro/godot v0.4.8/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= +github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= +github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= +github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= +github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= +github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a/go.mod h1:A3qUEEbsVkplJnxyDLwuIuvTDaJPByTH+hMdTl9ujAA= +github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= +github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51/go.mod h1:kPvI4H0AynFiHDN95ZB28/k70ZPGCx+pBrRh6RZPimw= +github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDGYRNkgM+FCwYNOD+6tOV+DE2fpjzfV6iqXyOgFIw= +github.com/thanos-io/thanos v0.13.1-0.20210224074000-659446cab117/go.mod h1:kdqFpzdkveIKpNNECVJd75RPvgsAifQgJymwCdfev1w= +github.com/thanos-io/thanos v0.19.0/go.mod h1:+mXfPepU1VrKw/fMfG2LIKF0NunOTZll452nGEFALhw= +github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tombuildsstuff/giovanni v0.10.0/go.mod h1:WwPhFP2+WnhJzvPYDnsyBab2wOIksMX6xm+Tg+jVvKw= +github.com/tombuildsstuff/giovanni v0.12.0/go.mod h1:qJ5dpiYWkRsuOSXO8wHbee7+wElkLNfWVolcf59N84E= +github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= +github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= +github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.3/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/quicktemplate v1.6.2/go.mod h1:mtEJpQtUiBV0SHhMX6RtiJtqxncgrfmjcUy5T68X8TM= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vmware/govmomi v0.22.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= +github.com/vmware/govmomi v0.22.2/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= +github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= +github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= +github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= +github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= +github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= +github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/weppos/publicsuffix-go v0.13.0 h1:0Tu1uzLBd1jPn4k6OnMmOPZH/l/9bj9kUOMMkoRs6Gg= +github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.1.1/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= +github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= +github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= +github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf h1:LEJcSlvjRUl6T7E0+mvKFxS61NsP1Z/+5jvHr4JQVVU= +github.com/zmap/zcrypto v0.0.0-20201128221613-3719af1573cf/go.mod h1:aPM7r+JOkfL+9qSB4KbYjtoEzJqUK50EXkkJabeNJDQ= +github.com/zmap/zlint/v3 v3.0.0 h1:mCCruybkWbiO8KlCXDqR48YOv+CTZyq9U8cOvXjfayU= +github.com/zmap/zlint/v3 v3.0.0/go.mod h1:paGwFySdHIBEMJ61YjoqT4h7Ge+fdYG4sUQhnTb1lJ8= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= +go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= +go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= +go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= +go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5/go.mod h1:N0RPWo9FXJYZQI4BTkDtQylrstIigYHeR18ONnyTufk= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20191010144846-132d2879e1e9/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191029154019-8994fa331a53/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200821190819-94841d0725da/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190319182350-c85d3e98c914/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210210192628-66670185b0cd/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190509141414-a5b02f93d862/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190621203818-d432491b9138/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190911201528-7ad0cfa0b7b5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191110163157-d32e6e3b99c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo= +golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190718200317-82a3ea8a504c/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911230505-6bfd74cf029c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190912215617-3720d1ec3678/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191204011308-9611592c72f6/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200214201135-548b770e2dfa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200228224639-71482053b885/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200321224714-0d839f3cf2ed/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200409170454-77362c5149f0/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200430192856-2840dafb9ee1/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200519015757-0d0afa43d58a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200701041122-1837592efa10/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200817023811-d00afeaade8f/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200827163409-021d7c6f1ec3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200911153331-7ad463ce66dd/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201020123448-f5c826d1900e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= +gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/gonum v0.0.0-20190915125329-975d99cd20a9/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.23.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.33.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.4/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190513181449-d00d292a067c/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191203220235-3fa9dbf08042/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200310143817-43be25429f5a/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200507105951-43844f6eee31/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200721032028-5044d0edf986/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200827165113-ac2560b5e952/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200911024640-645f7a48b24f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea h1:8ZyCcgugUqamxp/vZSEJw9CMy7VZlSWYJLLJPi/dSDA= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/AlecAivazis/survey.v1 v1.8.9-0.20200217094205-6773bdf39b7f/go.mod h1:CaHjv79TCgAvXMSFJSVgonHXYWxnhzI3eoHtnX5UgUo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= +gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= +gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20190502103701-55513cacd4ae/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +helm.sh/helm/v3 v3.1.0/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= +helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= +helm.sh/helm/v3 v3.2.0/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= +k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= +k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= +k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= +k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ= +k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= +k8s.io/apiserver v0.18.0-rc.1/go.mod h1:RYE9w2Lijk1aWW3i3pS7kFGU0Afof+UDoOz1qW9aSYg= +k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= +k8s.io/apiserver v0.19.1/go.mod h1:iRxYIjA0X2XEyoW8KslN4gDhasfH4bWcjj6ckVeZX28= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= +k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= +k8s.io/apiserver v0.22.1 h1:Ul9Iv8OMB2s45h2tl5XWPpAZo1VPIJ/6N+MESeed7L8= +k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400= +k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= +k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= +k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= +k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc= +k8s.io/cli-runtime v0.18.0-rc.1/go.mod h1:yuKZYDG8raONmwjwIkT77lCfIuPwX+Bsp88MKYf1TlU= +k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ= +k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ= +k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= +k8s.io/cli-runtime v0.19.1/go.mod h1:X6g8e4NBiG8GMsKewXsRpo36MO6xrvXa+0wCg7zO4aU= +k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= +k8s.io/cli-runtime v0.21.2/go.mod h1:8u/jFcM0QpoI28f6sfrAAIslLCXUYKD5SsPPMWiHYrI= +k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/cluster-bootstrap v0.0.0-20190202014938-c9acc0c1bea2/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= +k8s.io/cluster-registry v0.0.6/go.mod h1:/F+o1rvzjBdLbg782rR8eKrOb20hPy7us+Zu/pjBtAY= +k8s.io/code-generator v0.0.0-20181114232248-ae218e241252/go.mod h1:IPqxl/YHk05nodzupwjke6ctMjyNRdV2zZ5/j3/F204= +k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb/go.mod h1:cDx5jQmWH25Ff74daM7NVYty9JWw9dvIS9zT9eIubCY= +k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/code-generator v0.0.0-20191003035328-700b1226c0bd/go.mod h1:HC9p4y3SBN+txSs8x57qmNPXFZ/CxdCHiDTNnocCSEw= +k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= +k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= +k8s.io/code-generator v0.18.0-rc.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.19.1/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/code-generator v0.21.0-rc.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= +k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= +k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc= +k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= +k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= +k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= +k8s.io/component-base v0.18.0-rc.1/go.mod h1:NNlRaxZEdLqTs2+6yXiU2SHl8gKsbcy19Ii+Sfq53RM= +k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= +k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= +k8s.io/component-base v0.19.1/go.mod h1:b0vDKYa8EdJJ8dHUA6fGPj4z8taqGks5mfZvp3p/jVo= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= +k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= +k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= +k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo= +k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= +k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= +k8s.io/component-helpers v0.21.2/go.mod h1:DbyFt/A0p6Cv+R5+QOGSJ5f5t4xDfI8Yb89a57DgJlQ= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190907103519-ebc107f98eab/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/helm v2.16.3+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= +k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE= +k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-aggregator v0.0.0-20190404125450-f5e124c822d6/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= +k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA= +k8s.io/kube-aggregator v0.18.0-rc.1/go.mod h1:35N7x/aAF8C5rEU78J+3pJ/k9v/8LypeWbzqBAEWA1I= +k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= +k8s.io/kube-aggregator v0.19.1/go.mod h1:oAj1kWeSDCh7sdzUOs6XXPn/jbzJY+yGGxDd0QyLJC8= +k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo= +k8s.io/kube-aggregator v0.21.2/go.mod h1:7NgmUXJziySAJ7GxMRBBwcJay7MLUoxms31fw/ICpYk= +k8s.io/kube-aggregator v0.22.1 h1:hsntyWsnkLiL4ccmoKfqiUVyxnlnqtqPRMuq/mT2wGQ= +k8s.io/kube-aggregator v0.22.1/go.mod h1:VbmI+8fUeCPkzSvarWTrlIGEgUGEGI/66SFajDQ0Pdc= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= +k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= +k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28= +k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= +k8s.io/kubectl v0.18.0-rc.1/go.mod h1:UpG1w7klD633nyMS73/29cNl2tMdEbXU0nWupttyha4= +k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= +k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= +k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= +k8s.io/kubectl v0.19.1/go.mod h1:jZM7qucrDpQu05OAoSJk0yRRHRZNydya40dILYh8ODc= +k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= +k8s.io/kubectl v0.21.2 h1:9XPCetvOMDqrIZZXb1Ei+g8t6KrIp9ENJaysQjUuLiE= +k8s.io/kubectl v0.21.2/go.mod h1:PgeUclpG8VVmmQIl8zpLar3IQEpFc9mrmvlwY3CK1xo= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= +k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= +k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= +k8s.io/metrics v0.18.0-rc.1/go.mod h1:ME3EkXCyiZ7mVFEiAYKBfuo3JkpgggeATG+DBUQby5o= +k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4= +k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg= +k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= +k8s.io/metrics v0.19.1/go.mod h1:O/ONCgXDITtJuMveKEDwZSfiqHOiMZTWmyLe/p1BoAA= +k8s.io/metrics v0.19.4/go.mod h1:a0gvAzrxQPw2ouBqnXI7X9qlggpPkKAFgWU/Py+KZiU= +k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= +k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI= +k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190529001817-6999998975a7/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190923111123-69764acb6e8e/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200411171748-3d5a2fe318e4/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200821003339-5e75c0163111/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210305010621-2afb4311ab10/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176 h1:Mx0aa+SUAcNRQbs5jUzV8lkDlGFU8laZsY9jrcVX5SY= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +kubevirt.io/client-go v0.29.0/go.mod h1:JY7hQq+SUT0aLvleXrW/+28fDfZ6BPe4E6f8FyC8jkY= +kubevirt.io/containerized-data-importer v1.10.6/go.mod h1:qF594BtRRkruyrqLwt3zbLCWdPIQNs1qWh4LR1cOzy0= +kubevirt.io/containerized-data-importer v1.10.9/go.mod h1:qF594BtRRkruyrqLwt3zbLCWdPIQNs1qWh4LR1cOzy0= +launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/gofumpt v0.0.0-20200709182408-4fd085cb6d5f/go.mod h1:9VQ397fNXEnF84t90W4r4TRCQK+pg9f8ugVfyj+S26w= +mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= +open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1 h1:VEQavVubWOg7nlsNBjBAv5RdfuEaJh5oIqxUtSh/7oU= +open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1/go.mod h1:upMV+97asubQnQ1WY3W67nd4pIReZNR4beVkABXFjE0= +open-cluster-management.io/api v0.0.0-20210607023841-cd164385e2bb/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= +open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9 h1:ySrjJFbSuPbHEN0OvzTeQO8Bt93rjgvbce7lo2cQeZY= +open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.0.0-20190520212815-96b67f231945/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= +sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= +sigs.k8s.io/controller-runtime v0.3.1-0.20191016212439-2df793d02076/go.mod h1:p2vzQ3RuSVv9YR4AcM0y8TKHQA+0oLXazKFt6Z0OdS8= +sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= +sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= +sigs.k8s.io/controller-runtime v0.5.1-0.20200330174416-a11a908d91e0/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= +sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= +sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= +sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= +sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= +sigs.k8s.io/controller-runtime v0.9.3-0.20210709165254-650ea59f19cc/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= +sigs.k8s.io/controller-runtime v0.9.7 h1:DlHMlAyLpgEITVvNsuZqMbf8/sJl9HirmCZIeR5H9mQ= +sigs.k8s.io/controller-runtime v0.9.7/go.mod h1:nExcHcQ2zvLMeoO9K7rOesGCmgu32srN5SENvpAEbGA= +sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= +sigs.k8s.io/controller-tools v0.2.2-0.20190930215132-4752ed2de7d2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= +sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= +sigs.k8s.io/controller-tools v0.2.9-0.20200331153640-3c5446d407dd/go.mod h1:D2LzYpGDYjxaAALDVYAwaqaKp2fNuyO5yfOBoU/cbBE= +sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= +sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185/go.mod h1:JuPG+FXjAeZL7eGmTuXUJduEMlI2/kGqb0rUGlVi+Yo= +sigs.k8s.io/controller-tools v0.4.0/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= +sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= +sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I= +sigs.k8s.io/kubebuilder v1.0.9-0.20200513134826-f07a0146a40b/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= +sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= +sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= +sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= +sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= +sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= +sigs.k8s.io/kustomize/pluginator v1.0.0/go.mod h1:i8HdU5FdH1zDjCKiFf5CNl7slsc0QffyKsY2OuPynJ0= +sigs.k8s.io/kustomize/v3 v3.2.0/go.mod h1:ztX4zYc/QIww3gSripwF7TBOarBTm5BvyAMem0kCzOE= +sigs.k8s.io/kustomize/v3 v3.3.1 h1:UOhJqkRINRODnKq24DoDAr4gxk2z2p9iFJWDT3OLBx8= +sigs.k8s.io/kustomize/v3 v3.3.1/go.mod h1:2ojB+51Z+YIBpEOknAFX3U8f0XXa94PFcfXPccDxAfg= +sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 000000000..45dbbbbcf --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ \ No newline at end of file diff --git a/loaders/dashboards/Dockerfile b/loaders/dashboards/Dockerfile new file mode 100644 index 000000000..ce7e3dec8 --- /dev/null +++ b/loaders/dashboards/Dockerfile @@ -0,0 +1,52 @@ +# Copyright Contributors to the Open Cluster Management project + +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + +WORKDIR /workspace +COPY go.sum go.mod ./loaders/dashboards ./ +COPY ./loaders/dashboards ./loaders/dashboards + +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -v -i -o main loaders/dashboards/cmd/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ARG VCS_REF +ARG VCS_URL +ARG IMAGE_NAME +ARG IMAGE_DESCRIPTION +ARG IMAGE_DISPLAY_NAME +ARG IMAGE_NAME_ARCH +ARG IMAGE_MAINTAINER +ARG IMAGE_VENDOR +ARG IMAGE_VERSION +ARG IMAGE_RELEASE +ARG IMAGE_SUMMARY +ARG IMAGE_OPENSHIFT_TAGS + +LABEL org.label-schema.vendor="Red Hat" \ + org.label-schema.name="$IMAGE_NAME_ARCH" \ + org.label-schema.description="$IMAGE_DESCRIPTION" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url=$VCS_URL \ + org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \ + org.label-schema.schema-version="1.0" \ + name="$IMAGE_NAME" \ + maintainer="$IMAGE_MAINTAINER" \ + vendor="$IMAGE_VENDOR" \ + version="$IMAGE_VERSION" \ + release="$IMAGE_RELEASE" \ + description="$IMAGE_DESCRIPTION" \ + summary="$IMAGE_SUMMARY" \ + io.k8s.display-name="$IMAGE_DISPLAY_NAME" \ + io.k8s.description="$IMAGE_DESCRIPTION" \ + io.openshift.tags="$IMAGE_OPENSHIFT_TAGS" + +WORKDIR / + +RUN microdnf update -y && microdnf clean all + +COPY --from=builder /workspace/main grafana-dashboard-loader + +EXPOSE 3002 + +ENTRYPOINT ["/grafana-dashboard-loader"] diff --git a/loaders/dashboards/OWNERS b/loaders/dashboards/OWNERS new file mode 100644 index 000000000..c553cece9 --- /dev/null +++ b/loaders/dashboards/OWNERS @@ -0,0 +1,8 @@ +approvers: + - songleo +reviewers: + - clyang82 + - marcolan018 + - bjoydeep + - morvencao + - haoqing0110 diff --git a/loaders/dashboards/README.md b/loaders/dashboards/README.md new file mode 100644 index 000000000..a69c2e23e --- /dev/null +++ b/loaders/dashboards/README.md @@ -0,0 +1,14 @@ +# grafana-dashboard-loader + +Sidecar proxy to load grafana dashboards from configmaps. +## Prerequisites + +- You must install [Open Cluster Management Observabilty](https://github.com/stolostron/multicluster-observability-operator) + +## How to build image + +``` +$ docker build -f Dockerfile.prow -t grafana-dashboard-loader:latest . +``` + +Now, you can use this image to replace the grafana-dashboard-loader component and verify your PRs. diff --git a/loaders/dashboards/cmd/main.go b/loaders/dashboards/cmd/main.go new file mode 100644 index 000000000..49aa38110 --- /dev/null +++ b/loaders/dashboards/cmd/main.go @@ -0,0 +1,38 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package main + +import ( + "flag" + "os" + "os/signal" + "syscall" + + "github.com/spf13/pflag" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/loaders/dashboards/pkg/controller" +) + +func main() { + + klogFlags := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + klog.InitFlags(klogFlags) + flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + flagset.AddGoFlagSet(klogFlags) + + // use a channel to synchronize the finalization for a graceful shutdown + stop := make(chan struct{}) + defer close(stop) + + controller.RunGrafanaDashboardController(stop) + + // use a channel to handle OS signals to terminate and gracefully shut + // down processing + sigTerm := make(chan os.Signal, 1) + signal.Notify(sigTerm, syscall.SIGTERM) + signal.Notify(sigTerm, syscall.SIGINT) + <-sigTerm + +} diff --git a/loaders/dashboards/examples/k8s-dashboard.yaml b/loaders/dashboards/examples/k8s-dashboard.yaml new file mode 100644 index 000000000..63d93e450 --- /dev/null +++ b/loaders/dashboards/examples/k8s-dashboard.yaml @@ -0,0 +1,1833 @@ +apiVersion: v1 +data: + k8s-networking-cluster.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12124, + "graphTooltip": 0, + "id": 12, + "iteration": 1600907890286, + "links": [], + "panels": [ + { + "collapse": false, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 3, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 4, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": "center" + }, + "decimals": 2, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Namespace" + }, + "properties": [ + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": null + }, + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Drill down to pods", + "url": "d/Bfk6ByvMk/kubernetes-compute-resources-namespace-pods-v7?var-cluster=$cluster&var-namespace=${__data.fields[namespace]}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Rate of Received Packets" + }, + "properties": [ + { + "id": "unit", + "value": "pps" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Rate of Transmitted Packets" + }, + "properties": [ + { + "id": "unit", + "value": "pps" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Rate of Received Packets Dropped" + }, + "properties": [ + { + "id": "unit", + "value": "pps" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Rate of Transmitted Packets Dropped" + }, + "properties": [ + { + "id": "unit", + "value": "pps" + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "interval": "4m", + "minSpan": 24, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Rate of Transmitted Packets" + } + ] + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "namespace" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Current Bandwidth Received", + "Value #B": "Current Bandwidth Transmitted", + "Value #C": "Average Bandwidth Received", + "Value #D": "Average Bandwidth Transmitted", + "Value #E": "Rate of Received Packets", + "Value #F": "Rate of Transmitted Packets", + "Value #G": "Rate of Received Packets Dropped", + "Value #H": "Rate of Transmitted Packets Dropped", + "namespace": "Namespace" + } + } + } + ], + "type": "table" + }, + { + "collapse": true, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 6, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Average Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 20 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapse": false, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 9, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth History", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 30 + }, + "hiddenSeries": false, + "id": 10, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 39 + }, + "hiddenSeries": false, + "id": 11, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapse": true, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 12, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 13, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 14, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapse": true, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 67 + }, + "id": 15, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 68 + }, + "hiddenSeries": false, + "id": 16, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 77 + }, + "hiddenSeries": false, + "id": 17, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{namespace=~\".+\",cluster=\"$cluster\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 86 + }, + "hiddenSeries": false, + "id": 18, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "What is TCP Retransmit?", + "url": "https://accedian.com/enterprises/blog/network-packet-loss-retransmissions-and-duplicate-acknowledgements/" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_OutSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of TCP Retransmits out of all sent segments", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 95 + }, + "hiddenSeries": false, + "id": 19, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "Why monitor SYN retransmits?", + "url": "https://github.com/prometheus/node_exporter/issues/1023#issuecomment-408128365" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_TcpExt_TCPSynRetrans{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of TCP SYN Retransmits out of all retransmits", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 26, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Observatorium", + "value": "Observatorium" + }, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(node_cpu_seconds_total, cluster)", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(node_cpu_seconds_total, cluster)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Resolution", + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "1m,5m,1h", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "4h", + "value": "4h" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "interval", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Networking - Cluster", + "uid": "ff635a025bcfea7bc3dd4f508990a3e8", + "version": 0 + } +kind: ConfigMap +metadata: + name: networking-cluster + labels: + grafana-custom-dashboard: "true" diff --git a/loaders/dashboards/pkg/controller/dashboard_controller.go b/loaders/dashboards/pkg/controller/dashboard_controller.go new file mode 100644 index 000000000..9f7c222ff --- /dev/null +++ b/loaders/dashboards/pkg/controller/dashboard_controller.go @@ -0,0 +1,381 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package controller + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "regexp" + "strconv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/loaders/dashboards/pkg/util" +) + +const ( + unmarshallErrMsg = "Failed to unmarshall response body" + customFolderKey = "observability.open-cluster-management.io/dashboard-folder" + generalFolderKey = "general-folder" + defaultCustomFolder = "Custom" + homeDashboardTitle = "ACM - Clusters Overview" +) + +// DashboardLoader ... +type DashboardLoader struct { + coreClient corev1client.CoreV1Interface + informer cache.SharedIndexInformer +} + +var ( + grafanaURI = "http://127.0.0.1:3001" + //retry on errors + retry = 10 +) + +// RunGrafanaDashboardController ... +func RunGrafanaDashboardController(stop <-chan struct{}) { + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + klog.Error("Failed to get cluster config", "error", err) + } + // Build kubeclient client and informer for managed cluster + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + klog.Fatal("Failed to build kubeclient", "error", err) + } + + go newKubeInformer(kubeClient.CoreV1()).Run(stop) + <-stop +} + +func isDesiredDashboardConfigmap(obj interface{}) bool { + cm, ok := obj.(*corev1.ConfigMap) + if !ok || cm == nil { + return false + } + + labels := cm.ObjectMeta.Labels + if strings.ToLower(labels["grafana-custom-dashboard"]) == "true" { + return true + } + + owners := cm.GetOwnerReferences() + for _, owner := range owners { + if strings.Contains(cm.Name, "grafana-dashboard") && owner.Kind == "MultiClusterObservability" { + return true + } + } + + return false +} + +func newKubeInformer(coreClient corev1client.CoreV1Interface) cache.SharedIndexInformer { + // get watched namespace + watchedNS := os.Getenv("POD_NAMESPACE") + watchlist := &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + return coreClient.ConfigMaps(watchedNS).List(context.TODO(), metav1.ListOptions{}) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + return coreClient.ConfigMaps(watchedNS).Watch(context.TODO(), metav1.ListOptions{}) + }, + } + kubeInformer := cache.NewSharedIndexInformer( + watchlist, + &corev1.ConfigMap{}, + time.Second*0, + cache.Indexers{}, + ) + + kubeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if !isDesiredDashboardConfigmap(obj) { + return + } + klog.Infof("detect there is a new dashboard %v created", obj.(*corev1.ConfigMap).Name) + updateDashboard(nil, obj, false) + }, + UpdateFunc: func(old, new interface{}) { + if old.(*corev1.ConfigMap).ObjectMeta.ResourceVersion == new.(*corev1.ConfigMap).ObjectMeta.ResourceVersion { + return + } + if !isDesiredDashboardConfigmap(new) { + return + } + klog.Infof("detect there is a dashboard %v updated", new.(*corev1.ConfigMap).Name) + updateDashboard(old, new, false) + }, + DeleteFunc: func(obj interface{}) { + if !isDesiredDashboardConfigmap(obj) { + return + } + klog.Infof("detect there is a dashboard %v deleted", obj.(*corev1.ConfigMap).Name) + deleteDashboard(obj) + }, + }) + + return kubeInformer +} + +func hasCustomFolder(folderTitle string) float64 { + grafanaURL := grafanaURI + "/api/folders" + body, _ := util.SetRequest("GET", grafanaURL, nil, retry) + + folders := []map[string]interface{}{} + err := json.Unmarshal(body, &folders) + if err != nil { + klog.Error(unmarshallErrMsg, "error", err) + return 0 + } + + for _, folder := range folders { + if folder["title"] == folderTitle { + return folder["id"].(float64) + } + } + return 0 +} + +func createCustomFolder(folderTitle string) float64 { + folderID := hasCustomFolder(folderTitle) + if folderID == 0 { + grafanaURL := grafanaURI + "/api/folders" + body, _ := util.SetRequest("POST", grafanaURL, strings.NewReader("{\"title\":\""+folderTitle+"\"}"), retry) + folder := map[string]interface{}{} + err := json.Unmarshal(body, &folder) + if err != nil { + klog.Error(unmarshallErrMsg, "error", err) + return 0 + } + return folder["id"].(float64) + } + return folderID +} + +func getCustomFolderUID(folderID float64) string { + grafanaURL := grafanaURI + "/api/folders/id/" + fmt.Sprint(folderID) + body, _ := util.SetRequest("GET", grafanaURL, nil, retry) + folder := map[string]interface{}{} + err := json.Unmarshal(body, &folder) + if err != nil { + klog.Error(unmarshallErrMsg, "error", err) + return "" + } + uid, ok := folder["uid"] + if ok { + return uid.(string) + } + + return "" +} + +func isEmptyFolder(folderID float64) bool { + if folderID == 0 { + return false + } + + grafanaURL := grafanaURI + "/api/search?folderIds=" + fmt.Sprint(folderID) + body, _ := util.SetRequest("GET", grafanaURL, nil, retry) + dashboards := []map[string]interface{}{} + err := json.Unmarshal(body, &dashboards) + if err != nil { + klog.Error(unmarshallErrMsg, "error", err) + return false + } + + if len(dashboards) == 0 { + klog.Infof("folder %v is empty", folderID) + return true + } + + return false +} + +func deleteCustomFolder(folderID float64) bool { + if folderID == 0 { + return false + } + + uid := getCustomFolderUID(folderID) + if uid == "" { + klog.Error("Failed to get custom folder UID") + return false + } + + grafanaURL := grafanaURI + "/api/folders/" + uid + _, respStatusCode := util.SetRequest("DELETE", grafanaURL, nil, retry) + if respStatusCode != http.StatusOK { + klog.Errorf("failed to delete custom folder %v with %v", folderID, respStatusCode) + return false + } + + klog.Infof("custom folder %v deleted", folderID) + return true +} + +func getDashboardCustomFolderTitle(obj interface{}) string { + cm, ok := obj.(*corev1.ConfigMap) + if !ok || cm == nil { + return "" + } + + labels := cm.ObjectMeta.Labels + if labels[generalFolderKey] == "" || strings.ToLower(labels[generalFolderKey]) != "true" { + annotations := cm.ObjectMeta.Annotations + customFolder, ok := annotations[customFolderKey] + if !ok || customFolder == "" { + customFolder = defaultCustomFolder + } + return customFolder + } + return "" +} + +// updateDashboard is used to update the customized dashboards via calling grafana api +func updateDashboard(old, new interface{}, overwrite bool) { + folderID := 0.0 + folderTitle := getDashboardCustomFolderTitle(new) + if folderTitle != "" { + folderID = createCustomFolder(folderTitle) + if folderID == 0 { + klog.Error("Failed to get custom folder id") + return + } + } + + for _, value := range new.(*corev1.ConfigMap).Data { + + dashboard := map[string]interface{}{} + err := json.Unmarshal([]byte(value), &dashboard) + if err != nil { + klog.Error("Failed to unmarshall data", "error", err) + return + } + if dashboard["uid"] == nil { + dashboard["uid"], _ = util.GenerateUID(new.(*corev1.ConfigMap).GetName(), + new.(*corev1.ConfigMap).GetNamespace()) + } + dashboard["id"] = nil + data := map[string]interface{}{ + "folderId": folderID, + "overwrite": overwrite, + "dashboard": dashboard, + } + + b, err := json.Marshal(data) + if err != nil { + klog.Error("failed to marshal body", "error", err) + return + } + + grafanaURL := grafanaURI + "/api/dashboards/db" + body, respStatusCode := util.SetRequest("POST", grafanaURL, bytes.NewBuffer(b), retry) + + if respStatusCode != http.StatusOK { + if respStatusCode == http.StatusPreconditionFailed { + if strings.Contains(string(body), "version-mismatch") { + updateDashboard(nil, new, true) + } else if strings.Contains(string(body), "name-exists") { + klog.Info("the dashboard name already existed") + } else { + klog.Infof("failed to create/update: %v", respStatusCode) + } + } else { + klog.Infof("failed to create/update: %v", respStatusCode) + } + } else { + if dashboard["title"] == homeDashboardTitle { + // get "id" value from response + re := regexp.MustCompile("\"id\":(\\d+),") + result := re.FindSubmatch(body) + if len(result) != 2 { + klog.Infof("failed to retrieve dashboard id") + } else { + id, err := strconv.Atoi(strings.Trim(string(result[1]), " ")) + if err != nil { + klog.Error(err, "failed to parse dashboard id") + } else { + setHomeDashboard(id) + } + } + } + klog.Info("Dashboard created/updated") + } + } + + folderTitle = getDashboardCustomFolderTitle(old) + folderID = hasCustomFolder(folderTitle) + if isEmptyFolder(folderID) { + deleteCustomFolder(folderID) + } +} + +// DeleteDashboard ... +func deleteDashboard(obj interface{}) { + for _, value := range obj.(*corev1.ConfigMap).Data { + + dashboard := map[string]interface{}{} + err := json.Unmarshal([]byte(value), &dashboard) + if err != nil { + klog.Error("Failed to unmarshall data", "error", err) + return + } + + uid, _ := util.GenerateUID(obj.(*corev1.ConfigMap).Name, obj.(*corev1.ConfigMap).Namespace) + if dashboard["uid"] != nil { + uid = dashboard["uid"].(string) + } + + grafanaURL := grafanaURI + "/api/dashboards/uid/" + uid + + _, respStatusCode := util.SetRequest("DELETE", grafanaURL, nil, retry) + if respStatusCode != http.StatusOK { + klog.Errorf("failed to delete dashboard %v with %v", obj.(*corev1.ConfigMap).Name, respStatusCode) + } else { + klog.Info("Dashboard deleted") + } + + folderTitle := getDashboardCustomFolderTitle(obj) + folderID := hasCustomFolder(folderTitle) + if isEmptyFolder(folderID) { + deleteCustomFolder(folderID) + } + } + return +} + +func setHomeDashboard(id int) { + data := map[string]int{ + "homeDashboardId": id, + } + + b, err := json.Marshal(data) + if err != nil { + klog.Error("failed to marshal body", "error", err) + return + } + grafanaURL := grafanaURI + "/api/org/preferences" + _, respStatusCode := util.SetRequest("PUT", grafanaURL, bytes.NewBuffer(b), retry) + + if respStatusCode != http.StatusOK { + klog.Infof("failed to set home dashboard: %v", respStatusCode) + } else { + klog.Info("Home dashboard is set") + } +} diff --git a/loaders/dashboards/pkg/controller/dashboard_controller_test.go b/loaders/dashboards/pkg/controller/dashboard_controller_test.go new file mode 100644 index 000000000..a23886263 --- /dev/null +++ b/loaders/dashboards/pkg/controller/dashboard_controller_test.go @@ -0,0 +1,391 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package controller + +import ( + "context" + "io/ioutil" + "net/http" + "os" + "testing" + "time" + + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +var ( + hasFakeServer bool = false +) + +func createDashboard() (*corev1.ConfigMap, error) { + // read the whole file at once + data, err := ioutil.ReadFile("../../examples/k8s-dashboard.yaml") + if err != nil { + panic(err) + } + + var cm corev1.ConfigMap + err = yaml.Unmarshal(data, &cm) + return &cm, err +} + +func createFakeServer(t *testing.T) { + hasFakeServer = true + server3001 := http.NewServeMux() + server3001.HandleFunc("/api/folders", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("[{\"id\": 1,\"uid\": \"test\",\"title\": \"Custom\"}, {\"id\": 2, \"title\": \"noServer\",\"uid\": \"noServer\"}, {\"id\": 3,\"title\": \"noUID\"}]")) + }, + ) + + server3001.HandleFunc("/api/folders/id/1", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("{\"uid\": \"test\"}")) + }, + ) + + server3001.HandleFunc("/api/folders/id/2", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("{\"uid\": \"noServer\"}")) + }, + ) + + server3001.HandleFunc("/api/folders/id/3", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("{}")) + }, + ) + + server3001.HandleFunc("/api/folders/test", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("{}")) + }, + ) + + server3001.HandleFunc("/api/search", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("[]")) + }, + ) + + server3001.HandleFunc("/api/dashboards/db", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("done")) + }, + ) + + server3001.HandleFunc("/api/dashboards/uid/ff635a025bcfea7bc3dd4f508990a3e8", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("done")) + }, + ) + + err := http.ListenAndServe(":3001", server3001) + if err != nil { + t.Fatal("fail to create internal server at 3001") + } +} + +func TestGrafanaDashboardController(t *testing.T) { + + coreClient := fake.NewSimpleClientset().CoreV1() + stop := make(chan struct{}) + + go createFakeServer(t) + retry = 1 + + os.Setenv("POD_NAMESPACE", "ns2") + + informer := newKubeInformer(coreClient) + go informer.Run(stop) + + cm, err := createDashboard() + if err == nil { + _, err := coreClient.ConfigMaps("ns2").Create(context.TODO(), cm, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("fail to create configmap with %v", err) + } + // wait for 2 second to trigger AddFunc of informer + time.Sleep(time.Second * 2) + updateDashboard(nil, cm, false) + + cm.Data = map[string]string{} + _, err = coreClient.ConfigMaps("ns2").Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("fail to update configmap with %v", err) + } + // wait for 2 second to trigger UpdateFunc of informer + time.Sleep(time.Second * 2) + updateDashboard(nil, cm, false) + + cm, _ := createDashboard() + _, err = coreClient.ConfigMaps("ns2").Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("fail to update configmap with %v", err) + } + + // wait for 2 second to trigger UpdateFunc of informer + time.Sleep(time.Second * 2) + updateDashboard(nil, cm, false) + + coreClient.ConfigMaps("ns2").Delete(context.TODO(), cm.GetName(), metav1.DeleteOptions{}) + time.Sleep(time.Second * 2) + deleteDashboard(cm) + + } + + close(stop) + <-stop +} + +func TestIsDesiredDashboardConfigmap(t *testing.T) { + os.Setenv("POD_NAMESPACE", "test") + testCaseList := []struct { + name string + cm *corev1.ConfigMap + expected bool + }{ + + { + "invalid cm", + nil, + false, + }, + + { + "valid label", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + Labels: map[string]string{"grafana-custom-dashboard": "true"}, + }, + }, + true, + }, + + { + "valid name", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "grafana-dashboard", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + {Kind: "MultiClusterObservability"}, + }, + }, + }, + true, + }, + + { + "invalid label", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + Labels: map[string]string{"grafana-custom-dashboard": "false"}, + }, + }, + false, + }, + + { + "invalid name", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + {Kind: "MultiClusterObservability"}, + }, + }, + }, + false, + }, + + { + "invalid owner references", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + OwnerReferences: []metav1.OwnerReference{ + {Kind: "test"}, + }, + }, + }, + false, + }, + } + + for _, c := range testCaseList { + output := isDesiredDashboardConfigmap(c.cm) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestGetCustomFolderUID(t *testing.T) { + if !hasFakeServer { + go createFakeServer(t) + retry = 1 + } + + testCaseList := []struct { + name string + id float64 + expected string + }{ + + { + "valid folder", + 1, + "test", + }, + { + "invalid folder", + 0, + "", + }, + { + "no uid field", + 3, + "", + }, + } + for _, c := range testCaseList { + output := getCustomFolderUID(c.id) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestIsEmptyFolder(t *testing.T) { + if !hasFakeServer { + go createFakeServer(t) + retry = 1 + } + + testCaseList := []struct { + name string + folderID float64 + expected bool + }{ + + { + "invalid ID", + 0, + false, + }, + + { + "empty folder", + 1, + true, + }, + } + + for _, c := range testCaseList { + output := isEmptyFolder(c.folderID) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestGetDashboardCustomFolderTitle(t *testing.T) { + testCaseList := []struct { + name string + cm *corev1.ConfigMap + expected string + }{ + + { + "invalid cm", + nil, + "", + }, + + { + "default folder", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "grafana-dashboard", + Namespace: "test", + }, + }, + "Custom", + }, + + { + "general folder", + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "grafana-dashboard", + Namespace: "test", + Labels: map[string]string{"general-folder": "true"}, + }, + }, + "", + }, + } + + for _, c := range testCaseList { + output := getDashboardCustomFolderTitle(c.cm) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestDeleteCustomFolder(t *testing.T) { + if !hasFakeServer { + go createFakeServer(t) + retry = 1 + } + + testCaseList := []struct { + name string + folderID float64 + expected bool + }{ + + { + "invalid ID", + 0, + false, + }, + + { + "no UID", + 3, + false, + }, + + { + "request error", + 2, + false, + }, + + { + "valid name", + 1, + true, + }, + } + + for _, c := range testCaseList { + output := deleteCustomFolder(c.folderID) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} diff --git a/loaders/dashboards/pkg/util/grafana_util.go b/loaders/dashboards/pkg/util/grafana_util.go new file mode 100644 index 000000000..5b6367365 --- /dev/null +++ b/loaders/dashboards/pkg/util/grafana_util.go @@ -0,0 +1,74 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "encoding/hex" + "hash/fnv" + "io" + "io/ioutil" + "net/http" + "time" + + "k8s.io/klog" +) + +const ( + defaultAdmin = "WHAT_YOU_ARE_DOING_IS_VOIDING_SUPPORT_0000000000000000000000000000000000000000000000000000000000000000" +) + +// GenerateUID generates UID for customized dashboard +func GenerateUID(namespace string, name string) (string, error) { + uid := namespace + "-" + name + if len(uid) > 40 { + hasher := fnv.New128a() + _, err := hasher.Write([]byte(uid)) + if err != nil { + return "", err + } + uid = hex.EncodeToString(hasher.Sum(nil)) + } + return uid, nil +} + +// GetHTTPClient returns http client +func getHTTPClient() *http.Client { + transport := &http.Transport{} + client := &http.Client{Transport: transport} + return client +} + +// SetRequest ... +func SetRequest(method string, url string, body io.Reader, retry int) ([]byte, int) { + req, _ := http.NewRequest(method, url, body) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Forwarded-User", defaultAdmin) + + resp, err := getHTTPClient().Do(req) + times := 0 + for { + if err == nil { + break + } + klog.Error("failed to send HTTP request. Retry in 5 seconds ", "error ", err) + time.Sleep(time.Second * 5) + times++ + if times == retry { + klog.Errorf("failed to send HTTP request after retrying %v times", retry) + break + } + resp, err = getHTTPClient().Do(req) + } + + if resp != nil { + defer resp.Body.Close() + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + klog.Info("failed to parse response body ", "error ", err) + } + return respBody, resp.StatusCode + } else { + return nil, http.StatusNotFound + } +} diff --git a/loaders/dashboards/pkg/util/grafana_util_test.go b/loaders/dashboards/pkg/util/grafana_util_test.go new file mode 100644 index 000000000..2bc9138dc --- /dev/null +++ b/loaders/dashboards/pkg/util/grafana_util_test.go @@ -0,0 +1,46 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "net/http" + "testing" + "time" +) + +func TestGenerateUID(t *testing.T) { + + uid, _ := GenerateUID("open-cluster-management", "test") + if uid != "open-cluster-management-test" { + t.Fatalf("the uid %v is not the expected %v", uid, "open-cluster-management-test") + } + + uid, _ = GenerateUID("open-cluster-management-observability", "test") + if uid != "4e20548bdba37201faabf30d1c419981" { + t.Fatalf("the uid %v should not equal to %v", uid, "4e20548bdba37201faabf30d1c419981") + } + +} + +func createFakeServer(t *testing.T) { + server3002 := http.NewServeMux() + server3002.HandleFunc("/", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("done")) + }, + ) + err := http.ListenAndServe(":3002", server3002) + if err != nil { + t.Fatal("fail to create internal server at 3002") + } +} + +func TestSetRequest(t *testing.T) { + go createFakeServer(t) + time.Sleep(time.Second) + _, responseCode := SetRequest("GET", "http://127.0.0.1:3002", nil, 1) + if responseCode == http.StatusNotFound { + t.Fatalf("cannot send request to server: %v", responseCode) + } +} diff --git a/operators/endpointmetrics/Dockerfile b/operators/endpointmetrics/Dockerfile new file mode 100644 index 000000000..37348cd4a --- /dev/null +++ b/operators/endpointmetrics/Dockerfile @@ -0,0 +1,58 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project. +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + +WORKDIR /workspace +COPY go.sum go.mod ./ +COPY ./operators/endpointmetrics ./operators/endpointmetrics +COPY ./operators/multiclusterobservability/api ./operators/multiclusterobservability/api +COPY ./operators/pkg ./operators/pkg + +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -o build/_output/bin/endpoint-monitoring-operator operators/endpointmetrics/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ARG VCS_REF +ARG VCS_URL +ARG IMAGE_NAME +ARG IMAGE_DESCRIPTION +ARG IMAGE_DISPLAY_NAME +ARG IMAGE_NAME_ARCH +ARG IMAGE_MAINTAINER +ARG IMAGE_VENDOR +ARG IMAGE_VERSION +ARG IMAGE_RELEASE +ARG IMAGE_SUMMARY +ARG IMAGE_OPENSHIFT_TAGS + +LABEL org.label-schema.vendor="Red Hat" \ + org.label-schema.name="$IMAGE_NAME_ARCH" \ + org.label-schema.description="$IMAGE_DESCRIPTION" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url=$VCS_URL \ + org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \ + org.label-schema.schema-version="1.0" \ + name="$IMAGE_NAME" \ + maintainer="$IMAGE_MAINTAINER" \ + vendor="$IMAGE_VENDOR" \ + version="$IMAGE_VERSION" \ + release="$IMAGE_RELEASE" \ + description="$IMAGE_DESCRIPTION" \ + summary="$IMAGE_SUMMARY" \ + io.k8s.display-name="$IMAGE_DISPLAY_NAME" \ + io.k8s.description="$IMAGE_DESCRIPTION" \ + io.openshift.tags="$IMAGE_OPENSHIFT_TAGS" + +ENV OPERATOR=/usr/local/bin/endpoint-monitoring-operator \ + USER_UID=1001 \ + USER_NAME=endpoint-monitoring-operator + +RUN microdnf update -y && microdnf clean all + +COPY ./operators/endpointmetrics/manifests /usr/local/manifests + +# install operator binary +COPY --from=builder /workspace/build/_output/bin/endpoint-monitoring-operator ${OPERATOR} +USER ${USER_UID} + +ENTRYPOINT ["/usr/local/bin/endpoint-monitoring-operator"] diff --git a/operators/endpointmetrics/OWNERS b/operators/endpointmetrics/OWNERS new file mode 100644 index 000000000..7db34db0b --- /dev/null +++ b/operators/endpointmetrics/OWNERS @@ -0,0 +1,9 @@ +approvers: +- marcolan018 +- morvencao +- haoqing0110 +- songleo + +reviewers: +- clyang82 +- bjoydeep diff --git a/operators/endpointmetrics/README.md b/operators/endpointmetrics/README.md new file mode 100644 index 000000000..7b9183709 --- /dev/null +++ b/operators/endpointmetrics/README.md @@ -0,0 +1,140 @@ + +# endpoint-monitoring-operator + +## Overview + +The endpoint-monitoring-operator is a component of ACM observability feature. It is designed to install into Spoke Cluster. + + +## Developer Guide +The guide is used for developer to build and install the endpoint-monitoring-operator . It can be running in [kind][install_kind] if you don't have a OCP environment. + +### Prerequisites + +- git +- go version v1.15+ +- docker version 17.03+ +- kubectl version v1.16.3+ +- kustomize version v3.8.5+ +- operator-sdk version v1.4.2+ +- access to a Kubernetes v1.16.0+ cluster + +### Build the Operator + +1. Check out the endpoint-metrics-operator repository. + +``` +$ git clone git@github.com:stolostron/endpoint-metrics-operator.git +``` + +2. Build the endpoint-metrics-operator image and push it to a public registry, such as quay.io: + +``` +$ make -f Makefile.prow docker-build docker-push IMG=quay.io//endpoint-metrics-operator:latest +``` + +### Deploy this Operator + +1. Create the `open-cluster-management-addon-observability` namespace if it doesn't exist: + +``` +$ kubectl create ns open-cluster-management-addon-observability +``` + +2. Create the secret named `hub-kube-config` in namespace `open-cluster-management-addon-observability` about the hub cluster information: + +``` +$ cat << EOF | kubectl apply -n open-cluster-management-addon-observability -f - +kind: Secret +apiVersion: v1 +metadata: + name: hub-kube-config +type: Opaque +data: + kubeconfig: *** +EOF +``` + +> Note: the content of `kubeconfig` is base64-encoded content of kubeconfig for the hub cluster. + +3. Create the secret named `hub-info-secret` in namespace `open-cluster-management-addon-observability` about the hub cluster information: + +``` +$ cat << EOF | kubectl apply -n open-cluster-management-addon-observability -f - +kind: Secret +apiVersion: v1 +metadata: + name: hub-info-secret +type: Opaque +data: + clusterName: *** + hub-info.yaml: *** +EOF +``` + +> Note: the content of `clusterName` is base64-encoded yaml of the hub cluster name, while the content of `hub-info.yaml` is base64-encoded yaml that contains the observatorium api gateway URL, hub alertmanager URl and hub router CA which is exposed on the hub cluster. The original yaml content resembles below: + +```yaml +endpoint: "http://observatorium-api-open-cluster-management-observability.apps.stage3.demo.red-chesterfield.com/api/v1/receive" +hub-alertmanager-endpoint: "https://alertmanager-open-cluster-management-observability.apps.stage3.demo.red-chesterfield.com" +hub-alertmanager-router-ca: | +-----BEGIN CERTIFICATE----- +xxxxxxxxxxxxxxxxxxxxxxxxxxx +-----END CERTIFICATE----- +``` + +4. Create the configmap named `observability-metrics-allowlist` in namespace `open-cluster-management-addon-observability`: + +``` +$ kubectl apply -n open-cluster-management-addon-observability -f https://raw.githubusercontent.com/open-cluster-management/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml +``` + +5. Update the value of environment variable `COLLECTOR_IMAGE` in the endpoint-metrics-operator deployment, for example: `quay.io/stolostron/metrics-collector:2.3.0-SNAPSHOT-2021-04-08-09-07-10` + +``` +$ sed -i 's~REPLACE_WITH_METRICS_COLLECTOR_IMAGE~quay.io/stolostron/metrics-collector:2.3.0-SNAPSHOT-2021-04-08-09-07-10~g' config/manager/manager.yaml +``` + +6. Update the value of environment variable `HUB_NAMESPACE` with the actual hub namespace, for example: `cluster1` + +``` +$ sed -i 's~REPLACE_WITH_HUB_NAMESPACE~cluster1~g' config/manager/manager.yaml +``` + +7. Replace the operator image and deploy the endpoint-metrics-operator: + +``` +$ make -f Makefile.prow deploy IMG=quay.io//endpoint-metrics-operator:latest +``` + +8. Deploy the endpoint-metrics-operator CR: + +``` +$ kubectl -n open-cluster-management-addon-observability apply -f config/samples/observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml +``` + +### Verify the Installation + +After installed successfully, you will see the following pod are running: + +``` +# kubectl -n open-cluster-management-addon-observability get pod +NAME READY STATUS RESTARTS AGE +endpoint-observability-operator-7cf545f45c-cfjlk 1/1 Running 0 136m +metrics-collector-deployment-6dc9998cb-f2wd7 1/1 Running 0 136m +``` + +You should also see the CR created in the cluster: + +``` +# kubectl -n open-cluster-management-addon-observability get observabilityaddon +NAME AGE +observability-addon 137m +``` + +**Notice**: To deploy the `observabilityaddon` CR in local managed cluster just for dev/test purpose. In real topology, the `observabilityaddon` CR will be created in hub cluster, the endpoint-monitoring-operator should talk to api server of hub cluster to watch those CRs, and then perform changes on managed cluster. + +### View metrics in dashboard + +Access Grafana console in hub cluster at https://{YOUR_DOMAIN}/grafana, view the metrics in the dashboard named "ACM:Managed Cluster Monitoring" + diff --git a/operators/endpointmetrics/config/certmanager/certificate.yaml b/operators/endpointmetrics/config/certmanager/certificate.yaml new file mode 100644 index 000000000..52d866183 --- /dev/null +++ b/operators/endpointmetrics/config/certmanager/certificate.yaml @@ -0,0 +1,25 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/operators/endpointmetrics/config/certmanager/kustomization.yaml b/operators/endpointmetrics/config/certmanager/kustomization.yaml new file mode 100644 index 000000000..bebea5a59 --- /dev/null +++ b/operators/endpointmetrics/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/operators/endpointmetrics/config/certmanager/kustomizeconfig.yaml b/operators/endpointmetrics/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 000000000..90d7c313c --- /dev/null +++ b/operators/endpointmetrics/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,16 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: +- kind: Certificate + group: cert-manager.io + path: spec/commonName +- kind: Certificate + group: cert-manager.io + path: spec/dnsNames diff --git a/operators/endpointmetrics/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml b/operators/endpointmetrics/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml new file mode 100644 index 000000000..047213f1d --- /dev/null +++ b/operators/endpointmetrics/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: observabilityaddons.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: ObservabilityAddon + listKind: ObservabilityAddonList + plural: observabilityaddons + shortNames: + - oba + singular: observabilityaddon + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ObservabilityAddon is the Schema for the observabilityaddon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservabilityAddonSpec is the spec of observability addon + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push + metrics to hub server. + type: boolean + interval: + default: 30 + description: Interval for the observability addon push metrics to + hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + type: object + status: + description: ObservabilityAddonStatus defines the observed state of ObservabilityAddon + properties: + conditions: + items: + description: StatusCondition contains condition information for + an observability addon + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/config/crd/kustomization.yaml b/operators/endpointmetrics/config/crd/kustomization.yaml new file mode 100644 index 000000000..0e122d158 --- /dev/null +++ b/operators/endpointmetrics/config/crd/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- bases/observability.open-cluster-management.io_observabilityaddons.yaml diff --git a/operators/endpointmetrics/config/default/kustomization.yaml b/operators/endpointmetrics/config/default/kustomization.yaml new file mode 100644 index 000000000..e74f1e985 --- /dev/null +++ b/operators/endpointmetrics/config/default/kustomization.yaml @@ -0,0 +1,18 @@ +# Adds namespace to all resources. +namespace: open-cluster-management-addon-observability + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +# namePrefix: endpoint-metrics-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager diff --git a/operators/endpointmetrics/config/manager/kustomization.yaml b/operators/endpointmetrics/config/manager/kustomization.yaml new file mode 100644 index 000000000..5c5f0b84c --- /dev/null +++ b/operators/endpointmetrics/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/operators/endpointmetrics/config/manager/manager.yaml b/operators/endpointmetrics/config/manager/manager.yaml new file mode 100644 index 000000000..449b34924 --- /dev/null +++ b/operators/endpointmetrics/config/manager/manager.yaml @@ -0,0 +1,58 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: endpoint-observability-operator + namespace: open-cluster-management-addon-observability +spec: + replicas: 1 + selector: + matchLabels: + name: endpoint-observability-operator + template: + metadata: + labels: + name: endpoint-observability-operator + spec: + serviceAccountName: endpoint-observability-operator + containers: + - name: endpoint-observability-operator + image: quay.io/stolostron/endpoint-metrics-operator:latest + imagePullPolicy: Always + command: + - endpoint-monitoring-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPERATOR_NAME + value: "endpoint-monitoring-operator" + - name: COLLECTOR_IMAGE + value: REPLACE_WITH_METRICS_COLLECTOR_IMAGE + - name: HUB_KUBECONFIG + value: /spoke/hub-kubeconfig/kubeconfig + - name: HUB_NAMESPACE + value: REPLACE_WITH_HUB_NAMESPACE + volumeMounts: + - mountPath: /spoke/hub-kubeconfig + name: hub-kubeconfig-secret + readOnly: true + volumes: + - name: hub-kubeconfig-secret + secret: + defaultMode: 420 + secretName: hub-kube-config diff --git a/operators/endpointmetrics/config/rbac/emo_role.yaml b/operators/endpointmetrics/config/rbac/emo_role.yaml new file mode 100644 index 000000000..8e6d548ad --- /dev/null +++ b/operators/endpointmetrics/config/rbac/emo_role.yaml @@ -0,0 +1,119 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: endpoint-observability-operator +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - secrets + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get +- apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - observability.open-cluster-management.io + resources: + - multiclusterobservabilities + - observabilityaddons + verbs: + - list + - watch + - get +- apiGroups: + - observability.open-cluster-management.io + resources: + - observabilityaddons/status + verbs: + - get + - update +- apiGroups: + - config.openshift.io + resources: + - clusterversions + verbs: + - get +- apiGroups: + - work.open-cluster-management.io + resources: + - appliedmanifestworks + verbs: + - get +- apiGroups: + - work.open-cluster-management.io + resources: + - appliedmanifestworks/finalizers + verbs: + - update +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - delete + - get + - list + - watch + - create + - update + - patch diff --git a/operators/endpointmetrics/config/rbac/emo_rolebinding.yaml b/operators/endpointmetrics/config/rbac/emo_rolebinding.yaml new file mode 100644 index 000000000..c6282dff1 --- /dev/null +++ b/operators/endpointmetrics/config/rbac/emo_rolebinding.yaml @@ -0,0 +1,14 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: endpoint-observability-operator +roleRef: + kind: ClusterRole + name: endpoint-observability-operator + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: endpoint-observability-operator + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/config/rbac/emo_serviceaccount.yaml b/operators/endpointmetrics/config/rbac/emo_serviceaccount.yaml new file mode 100644 index 000000000..6e9bd171e --- /dev/null +++ b/operators/endpointmetrics/config/rbac/emo_serviceaccount.yaml @@ -0,0 +1,7 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: endpoint-observability-operator + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/config/rbac/kustomization.yaml b/operators/endpointmetrics/config/rbac/kustomization.yaml new file mode 100644 index 000000000..06f64afd7 --- /dev/null +++ b/operators/endpointmetrics/config/rbac/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- emo_role.yaml +- emo_rolebinding.yaml +- emo_serviceaccount.yaml diff --git a/operators/endpointmetrics/config/samples/kustomization.yaml b/operators/endpointmetrics/config/samples/kustomization.yaml new file mode 100644 index 000000000..5e80f4280 --- /dev/null +++ b/operators/endpointmetrics/config/samples/kustomization.yaml @@ -0,0 +1,4 @@ +## Append samples you want in your CSV to this file as resources ## +resources: +- observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/operators/endpointmetrics/config/samples/observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml b/operators/endpointmetrics/config/samples/observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml new file mode 100644 index 000000000..ad5104fec --- /dev/null +++ b/operators/endpointmetrics/config/samples/observability.open-cluster-management.io_v1beta1_observabilityaddon.yaml @@ -0,0 +1,7 @@ +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: ObservabilityAddon +metadata: + name: observability-addon +spec: + enableMetrics: true + interval: 30 diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go new file mode 100644 index 000000000..c6ac211bf --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go @@ -0,0 +1,304 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/rendering" + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +const ( + metricsConfigMapKey = "metrics_list.yaml" + metricsCollectorName = "metrics-collector-deployment" + selectorKey = "component" + selectorValue = "metrics-collector" + caMounthPath = "/etc/serving-certs-ca-bundle" + caVolName = "serving-certs-ca-bundle" + mtlsCertName = "observability-controller-open-cluster-management.io-observability-signer-client-cert" + mtlsCaName = "observability-managed-cluster-certs" + limitBytes = 1073741824 + defaultInterval = "30s" +) + +const ( + restartLabel = "cert/time-restarted" +) + +var ( + ocpPromURL = "https://prometheus-k8s.openshift-monitoring.svc:9091" + promURL = "https://prometheus-k8s:9091" +) + +type MetricsAllowlist struct { + NameList []string `yaml:"names"` + MatchList []string `yaml:"matches"` + RenameMap map[string]string `yaml:"renames"` + RuleList []Rule `yaml:"rules"` +} + +// Rule is the struct for recording rules and alert rules +type Rule struct { + Record string `yaml:"record"` + Expr string `yaml:"expr"` +} + +func createDeployment(clusterID string, clusterType string, + obsAddonSpec oashared.ObservabilityAddonSpec, + hubInfo operatorconfig.HubInfo, allowlist MetricsAllowlist, + nodeSelector map[string]string, tolerations []corev1.Toleration, + replicaCount int32) *appsv1.Deployment { + interval := fmt.Sprint(obsAddonSpec.Interval) + "s" + if fmt.Sprint(obsAddonSpec.Interval) == "" { + interval = defaultInterval + } + + volumes := []corev1.Volume{ + { + Name: "mtlscerts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: mtlsCertName, + }, + }, + }, + { + Name: "mtlsca", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: mtlsCaName, + }, + }, + }, + } + mounts := []corev1.VolumeMount{ + { + Name: "mtlscerts", + MountPath: "/tlscerts/certs", + }, + { + Name: "mtlsca", + MountPath: "/tlscerts/ca", + }, + } + caFile := caMounthPath + "/service-ca.crt" + if clusterID == "" { + clusterID = hubInfo.ClusterName + // deprecated ca bundle, only used for ocp 3.11 env + caFile = "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt" + } else { + volumes = append(volumes, corev1.Volume{ + Name: caVolName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: caConfigmapName, + }, + }, + }, + }) + mounts = append(mounts, corev1.VolumeMount{ + Name: caVolName, + MountPath: caMounthPath, + }) + } + + commands := []string{ + "/usr/bin/metrics-collector", + "--from=$(FROM)", + "--to-upload=$(TO)", + "--interval=" + interval, + "--limit-bytes=" + strconv.Itoa(limitBytes), + fmt.Sprintf("--label=\"cluster=%s\"", hubInfo.ClusterName), + fmt.Sprintf("--label=\"clusterID=%s\"", clusterID), + } + commands = append(commands, "--from-token-file=/var/run/secrets/kubernetes.io/serviceaccount/token") + if !installPrometheus { + commands = append(commands, "--from-ca-file="+caFile) + } + if clusterType != "" { + commands = append(commands, fmt.Sprintf("--label=\"clusterType=%s\"", clusterType)) + } + for _, metrics := range allowlist.NameList { + commands = append(commands, fmt.Sprintf("--match={__name__=\"%s\"}", metrics)) + } + for _, match := range allowlist.MatchList { + commands = append(commands, fmt.Sprintf("--match={%s}", match)) + } + + renamekeys := make([]string, 0, len(allowlist.RenameMap)) + for k := range allowlist.RenameMap { + renamekeys = append(renamekeys, k) + } + sort.Strings(renamekeys) + for _, k := range renamekeys { + commands = append(commands, fmt.Sprintf("--rename=\"%s=%s\"", k, allowlist.RenameMap[k])) + } + for _, rule := range allowlist.RuleList { + commands = append(commands, fmt.Sprintf("--recordingrule={\"name\":\"%s\",\"query\":\"%s\"}", rule.Record, rule.Expr)) + } + from := promURL + if !installPrometheus { + from = ocpPromURL + } + metricsCollectorDep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: metricsCollectorName, + Namespace: namespace, + Annotations: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(replicaCount), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + selectorKey: selectorValue, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + selectorKey: selectorValue, + }, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: serviceAccountName, + Containers: []corev1.Container{ + { + Name: "metrics-collector", + Image: rendering.Images[operatorconfig.MetricsCollectorKey], + Command: commands, + Env: []corev1.EnvVar{ + { + Name: "FROM", + Value: from, + }, + { + Name: "TO", + Value: hubInfo.ObservatoriumAPIEndpoint, + }, + }, + VolumeMounts: mounts, + ImagePullPolicy: corev1.PullAlways, + }, + }, + Volumes: volumes, + NodeSelector: nodeSelector, + Tolerations: tolerations, + }, + }, + }, + } + if obsAddonSpec.Resources != nil { + metricsCollectorDep.Spec.Template.Spec.Containers[0].Resources = *obsAddonSpec.Resources + } + return metricsCollectorDep +} + +func updateMetricsCollector(ctx context.Context, client client.Client, obsAddonSpec oashared.ObservabilityAddonSpec, + hubInfo operatorconfig.HubInfo, clusterID string, clusterType string, + replicaCount int32, forceRestart bool) (bool, error) { + + list := getMetricsAllowlist(ctx, client) + endpointDeployment := getEndpointDeployment(ctx, client) + deployment := createDeployment(clusterID, clusterType, obsAddonSpec, hubInfo, list, + endpointDeployment.Spec.Template.Spec.NodeSelector, endpointDeployment.Spec.Template.Spec.Tolerations, replicaCount) + found := &appsv1.Deployment{} + err := client.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, found) + if err != nil { + if errors.IsNotFound(err) { + err = client.Create(ctx, deployment) + if err != nil { + log.Error(err, "Failed to create metrics-collector deployment") + return false, err + } + log.Info("Created metrics-collector deployment ") + } else { + log.Error(err, "Failed to check the metrics-collector deployment") + return false, err + } + } else { + if !reflect.DeepEqual(deployment.Spec.Template.Spec, found.Spec.Template.Spec) || + !reflect.DeepEqual(deployment.Spec.Replicas, found.Spec.Replicas) || + forceRestart { + deployment.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + if forceRestart && found.Status.ReadyReplicas != 0 { + deployment.Spec.Template.ObjectMeta.Labels[restartLabel] = time.Now().Format("2006-1-2.1504") + } + err = client.Update(ctx, deployment) + if err != nil { + log.Error(err, "Failed to update metrics-collector deployment") + return false, err + } + log.Info("Updated metrics-collector deployment ") + } + } + return true, nil +} + +func deleteMetricsCollector(ctx context.Context, client client.Client) error { + found := &appsv1.Deployment{} + err := client.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("The metrics collector deployment does not exist") + return nil + } + log.Error(err, "Failed to check the metrics collector deployment") + return err + } + err = client.Delete(ctx, found) + if err != nil { + log.Error(err, "Failed to delete the metrics collector deployment") + return err + } + log.Info("metrics collector deployment deleted") + return nil +} + +func int32Ptr(i int32) *int32 { return &i } + +func getMetricsAllowlist(ctx context.Context, client client.Client) MetricsAllowlist { + l := &MetricsAllowlist{} + cm := &corev1.ConfigMap{} + err := client.Get(ctx, types.NamespacedName{Name: operatorconfig.AllowlistConfigMapName, + Namespace: namespace}, cm) + if err != nil { + log.Error(err, "Failed to get configmap") + } else { + if cm.Data != nil { + err = yaml.Unmarshal([]byte(cm.Data[metricsConfigMapKey]), l) + if err != nil { + log.Error(err, "Failed to unmarshal data in configmap") + } + } + } + return *l +} + +func getEndpointDeployment(ctx context.Context, client client.Client) appsv1.Deployment { + d := &appsv1.Deployment{} + err := client.Get(ctx, types.NamespacedName{Name: "endpoint-observability-operator", Namespace: namespace}, d) + if err != nil { + log.Error(err, "Failed to get deployment") + } + return *d +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go new file mode 100644 index 000000000..3a0b37961 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go @@ -0,0 +1,87 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +func getAllowlistCM() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.AllowlistConfigMapName, + Namespace: namespace, + }, + Data: map[string]string{ + metricsConfigMapKey: ` +names: + - a + - b +matches: + - c +rules: + - record: f + expr: g +`}, + } +} + +func init() { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + oav1beta1.AddToScheme(s) + + namespace = testNamespace + hubNamespace = testHubNamspace +} + +func TestMetricsCollector(t *testing.T) { + hubInfo := &operatorconfig.HubInfo{ + ClusterName: "test-cluster", + ObservatoriumAPIEndpoint: "http://test-endpoint", + } + allowlistCM := getAllowlistCM() + obsAddon := oashared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 60, + } + + ctx := context.TODO() + c := fake.NewFakeClient(allowlistCM) + // Default deployment with instance count 1 + _, err := updateMetricsCollector(ctx, c, obsAddon, *hubInfo, testClusterID, "", 1, false) + if err != nil { + t.Fatalf("Failed to create metrics collector deployment: (%v)", err) + } + // Update deployment to reduce instance count to zero + _, err = updateMetricsCollector(ctx, c, obsAddon, *hubInfo, testClusterID, "", 0, false) + if err != nil { + t.Fatalf("Failed to create metrics collector deployment: (%v)", err) + } + + _, err = updateMetricsCollector(ctx, c, obsAddon, *hubInfo, testClusterID+"-update", "SNO", 1, false) + if err != nil { + t.Fatalf("Failed to create metrics collector deployment: (%v)", err) + } + + _, err = updateMetricsCollector(ctx, c, obsAddon, *hubInfo, testClusterID+"-update", "SNO", 1, true) + if err != nil { + t.Fatalf("Failed to update metrics collector deployment: (%v)", err) + } + + err = deleteMetricsCollector(ctx, c) + if err != nil { + t.Fatalf("Failed to delete metrics collector deployment: (%v)", err) + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go new file mode 100644 index 000000000..acf4238b0 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go @@ -0,0 +1,354 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "fmt" + "os" + "strconv" + + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/rendering" + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/util" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/deploying" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" +) + +var ( + log = ctrl.Log.WithName("controllers").WithName("ObservabilityAddon") + installPrometheus, _ = strconv.ParseBool(os.Getenv(operatorconfig.InstallPrometheus)) + globalRes = []*unstructured.Unstructured{} +) + +const ( + obAddonName = "observability-addon" + mcoCRName = "observability" + ownerLabelKey = "owner" + ownerLabelValue = "observabilityaddon" + obsAddonFinalizer = "observability.open-cluster-management.io/addon-cleanup" + promSvcName = "prometheus-k8s" + promNamespace = "openshift-monitoring" +) + +var ( + namespace = os.Getenv("WATCH_NAMESPACE") + hubNamespace = os.Getenv("HUB_NAMESPACE") +) + +// ObservabilityAddonReconciler reconciles a ObservabilityAddon object +type ObservabilityAddonReconciler struct { + Client client.Client + Scheme *runtime.Scheme + HubClient client.Client +} + +// +kubebuilder:rbac:groups=observability.open-cluster-management.io.open-cluster-management.io,resources=observabilityaddons,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=observability.open-cluster-management.io.open-cluster-management.io,resources=observabilityaddons/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=observability.open-cluster-management.io.open-cluster-management.io,resources=observabilityaddons/finalizers,verbs=update + +// Reconcile reads that state of the cluster for a ObservabilityAddon object and makes changes based on the state read +// and what is in the ObservabilityAddon.Spec +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) + log.Info("Reconciling") + + // Fetch the ObservabilityAddon instance in hub cluster + hubObsAddon := &oav1beta1.ObservabilityAddon{} + err := r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) + if err != nil { + log.Error(err, "Failed to get observabilityaddon", "namespace", hubNamespace) + return ctrl.Result{}, err + } + + // Fetch the ObservabilityAddon instance in local cluster + obsAddon := &oav1beta1.ObservabilityAddon{} + err = r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon) + if err != nil { + if errors.IsNotFound(err) { + obsAddon = nil + } else { + log.Error(err, "Failed to get observabilityaddon", "namespace", namespace) + return ctrl.Result{}, err + } + } + + // Init finalizers + deleteFlag := false + if obsAddon == nil { + deleteFlag = true + } + deleted, err := r.initFinalization(ctx, deleteFlag, hubObsAddon) + if err != nil { + return ctrl.Result{}, err + } + if deleted || deleteFlag { + return ctrl.Result{}, nil + } + + // retrieve the hubInfo + hubSecret := &corev1.Secret{} + err = r.Client.Get(ctx, types.NamespacedName{Name: operatorconfig.HubInfoSecretName, Namespace: namespace}, hubSecret) + if err != nil { + return ctrl.Result{}, err + } + hubInfo := &operatorconfig.HubInfo{} + err = yaml.Unmarshal(hubSecret.Data[operatorconfig.HubInfoSecretKey], &hubInfo) + if err != nil { + log.Error(err, "Failed to unmarshal hub info") + return ctrl.Result{}, err + } + hubInfo.ClusterName = string(hubSecret.Data[operatorconfig.ClusterNameKey]) + + clusterType := "" + clusterID := "" + + //read the image configmap + imagesCM := &corev1.ConfigMap{} + err = r.Client.Get(ctx, types.NamespacedName{Name: operatorconfig.ImageConfigMap, + Namespace: namespace}, imagesCM) + if err != nil { + log.Error(err, "Failed to get images configmap") + return ctrl.Result{}, err + } + rendering.Images = imagesCM.Data + + if !installPrometheus { + // If no prometheus service found, set status as NotSupported + promSvc := &corev1.Service{} + err = r.Client.Get(ctx, types.NamespacedName{Name: promSvcName, + Namespace: promNamespace}, promSvc) + if err != nil { + if errors.IsNotFound(err) { + log.Error(err, "OCP prometheus service does not exist") + util.ReportStatus(ctx, r.Client, obsAddon, "NotSupported") + return ctrl.Result{}, nil + } + log.Error(err, "Failed to check prometheus resource") + return ctrl.Result{}, err + } + + clusterID, err = getClusterID(ctx, r.Client) + if err != nil { + // OCP 3.11 has no cluster id, set it as empty string + clusterID = "" + // to differentiate ocp 3.x + clusterType = "ocp3" + } + isSNO, err := isSNO(ctx, r.Client) + if err == nil && isSNO { + clusterType = "SNO" + } + err = createMonitoringClusterRoleBinding(ctx, r.Client) + if err != nil { + return ctrl.Result{}, err + } + err = createCAConfigmap(ctx, r.Client) + if err != nil { + return ctrl.Result{}, err + } + } else { + //Render the prometheus templates + renderer := rendererutil.NewRenderer() + toDeploy, err := rendering.Render(renderer, r.Client, hubInfo) + if err != nil { + log.Error(err, "Failed to render prometheus templates") + return ctrl.Result{}, err + } + deployer := deploying.NewDeployer(r.Client) + for _, res := range toDeploy { + if err := controllerutil.SetControllerReference(obsAddon, res, r.Scheme); err != nil { + log.Info("Failed to set controller reference", "resource", res.GetName()) + globalRes = append(globalRes, res) + } + if err := deployer.Deploy(res); err != nil { + log.Error(err, fmt.Sprintf("Failed to deploy %s %s/%s", + res.GetKind(), namespace, res.GetName())) + return ctrl.Result{}, err + } + } + } + + // create or update the cluster-monitoring-config configmap and relevant resources + if err := createOrUpdateClusterMonitoringConfig(ctx, hubInfo, clusterID, r.Client, installPrometheus); err != nil { + return ctrl.Result{}, err + } + + if obsAddon.Spec.EnableMetrics { + forceRestart := false + if req.Name == mtlsCertName || req.Name == mtlsCaName || req.Name == caConfigmapName { + forceRestart = true + } + created, err := updateMetricsCollector(ctx, r.Client, obsAddon.Spec, *hubInfo, clusterID, clusterType, 1, forceRestart) + if err != nil { + util.ReportStatus(ctx, r.Client, obsAddon, "Degraded") + return ctrl.Result{}, err + } + if created { + util.ReportStatus(ctx, r.Client, obsAddon, "Deployed") + } + } else { + deleted, err := updateMetricsCollector(ctx, r.Client, obsAddon.Spec, *hubInfo, clusterID, clusterType, 0, false) + if err != nil { + return ctrl.Result{}, err + } + if deleted { + util.ReportStatus(ctx, r.Client, obsAddon, "Disabled") + } + } + + //TODO: UPDATE + return ctrl.Result{}, nil +} + +func (r *ObservabilityAddonReconciler) initFinalization( + ctx context.Context, delete bool, hubObsAddon *oav1beta1.ObservabilityAddon) (bool, error) { + if delete && contains(hubObsAddon.GetFinalizers(), obsAddonFinalizer) { + log.Info("To clean observability components/configurations in the cluster") + err := deleteMetricsCollector(ctx, r.Client) + if err != nil { + return false, err + } + + // revert the change to cluster monitoring stack + err = revertClusterMonitoringConfig(ctx, r.Client, installPrometheus) + if err != nil { + return false, err + } + + // Should we return bool from the delete functions for crb and cm? What is it used for? Should we use the bool before removing finalizer? + // SHould we return true if metricscollector is not found as that means metrics collector is not present? + // Moved this part up as we need to clean up cm and crb before we remove the finalizer - is that the right way to do it? + if !installPrometheus { + err = deleteMonitoringClusterRoleBinding(ctx, r.Client) + if err != nil { + return false, err + } + err = deleteCAConfigmap(ctx, r.Client) + if err != nil { + return false, err + } + } else { + // delete resources which is not namespace scoped or located in other namespaces + for _, res := range globalRes { + err = r.Client.Delete(context.TODO(), res) + if err != nil && !errors.IsNotFound(err) { + return false, err + } + } + } + hubObsAddon.SetFinalizers(remove(hubObsAddon.GetFinalizers(), obsAddonFinalizer)) + err = r.HubClient.Update(ctx, hubObsAddon) + if err != nil { + log.Error(err, "Failed to remove finalizer to observabilityaddon", "namespace", hubObsAddon.Namespace) + return false, err + } + log.Info("Finalizer removed from observabilityaddon resource") + return true, nil + } + if !contains(hubObsAddon.GetFinalizers(), obsAddonFinalizer) { + hubObsAddon.SetFinalizers(append(hubObsAddon.GetFinalizers(), obsAddonFinalizer)) + err := r.HubClient.Update(ctx, hubObsAddon) + if err != nil { + log.Error(err, "Failed to add finalizer to observabilityaddon", "namespace", hubObsAddon.Namespace) + return false, err + } + log.Info("Finalizer added to observabilityaddon resource") + } + return false, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ObservabilityAddonReconciler) SetupWithManager(mgr ctrl.Manager) error { + if os.Getenv("NAMESPACE") != "" { + namespace = os.Getenv("NAMESPACE") + } + return ctrl.NewControllerManagedBy(mgr). + For( + &oav1beta1.ObservabilityAddon{}, + builder.WithPredicates(getPred(obAddonName, namespace, true, true, true)), + ). + Watches( + &source.Kind{Type: &corev1.Secret{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(operatorconfig.HubInfoSecretName, namespace, true, true, false)), + ). + Watches( + &source.Kind{Type: &corev1.Secret{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(mtlsCertName, namespace, true, true, false)), + ). + Watches( + &source.Kind{Type: &corev1.Secret{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(mtlsCaName, namespace, true, true, false)), + ). + Watches( + &source.Kind{Type: &corev1.Secret{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(hubAmAccessorSecretName, namespace, true, true, false)), + ). + Watches( + &source.Kind{Type: &corev1.ConfigMap{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(operatorconfig.AllowlistConfigMapName, namespace, true, true, false)), + ). + Watches( + &source.Kind{Type: &corev1.ConfigMap{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(caConfigmapName, namespace, false, true, true)), + ). + Watches( + &source.Kind{Type: &appsv1.Deployment{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(metricsCollectorName, namespace, true, true, true)), + ). + Watches( + &source.Kind{Type: &rbacv1.ClusterRoleBinding{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(clusterRoleBindingName, "", false, true, true)), + ). + Watches( + &source.Kind{Type: &corev1.ConfigMap{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(getPred(operatorconfig.ImageConfigMap, namespace, true, true, false)), + ). + Complete(r) +} + +func contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + return false +} + +func remove(list []string, s string) []string { + result := []string{} + for _, v := range list { + if v != s { + result = append(result, v) + } + } + return result +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go new file mode 100644 index 000000000..13c4bcbb4 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller_test.go @@ -0,0 +1,343 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "strings" + "testing" + + ocinfrav1 "github.com/openshift/api/config/v1" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + name = "observability-addon" + testNamespace = "test-ns" + testHubNamspace = "test-hub-ns" + testBearerToken = "test-bearer-token" +) + +func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { + return &oav1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: oashared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 60, + }, + } +} + +func newPromSvc() *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: promSvcName, + Namespace: promNamespace, + }, + } +} + +func newHubInfoSecret(data []byte) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.HubInfoSecretName, + Namespace: testNamespace, + }, + Data: map[string][]byte{ + operatorconfig.HubInfoSecretKey: data, + operatorconfig.ClusterNameKey: []byte("test-cluster"), + }, + } +} + +func newAMAccessorSecret() *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: hubAmAccessorSecretName, + Namespace: testNamespace, + }, + Data: map[string][]byte{ + "token": []byte(testBearerToken), + }, + } +} + +func newClusterMonitoringConfigCM(configDataStr string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterMonitoringConfigName, + Namespace: promNamespace, + }, + Data: map[string]string{ + clusterMonitoringConfigDataKey: configDataStr, + }, + } +} + +func newImagesCM() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.ImageConfigMap, + Namespace: testNamespace, + }, + Data: map[string]string{ + operatorconfig.MetricsCollectorKey: "metrics-collector-image", + }, + } +} + +func init() { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + oav1beta1.AddToScheme(s) + ocinfrav1.AddToScheme(s) + + namespace = testNamespace + hubNamespace = testHubNamspace +} + +func TestObservabilityAddonController(t *testing.T) { + hubInfoData := []byte(` +endpoint: "http://test-endpoint" +alertmanager-endpoint: "http://test-alertamanger-endpoint" +alertmanager-router-ca: | + -----BEGIN CERTIFICATE----- + xxxxxxxxxxxxxxxxxxxxxxxxxxx + -----END CERTIFICATE----- +`) + + hubObjs := []runtime.Object{} + hubInfo := newHubInfoSecret(hubInfoData) + amAccessSrt := newAMAccessorSecret() + allowList := getAllowlistCM() + images := newImagesCM() + objs := []runtime.Object{hubInfo, amAccessSrt, allowList, images, cv, infra} + + hubClient := fake.NewFakeClient(hubObjs...) + c := fake.NewFakeClient(objs...) + + r := &ObservabilityAddonReconciler{ + Client: c, + HubClient: hubClient, + } + + // test error in reconcile if missing obervabilityaddon + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + ctx := context.TODO() + _, err := r.Reconcile(ctx, req) + if err == nil { + t.Fatalf("reconcile: miss the error for missing obervabilityaddon") + } + + // test reconcile w/o prometheus-k8s svc + err = hubClient.Create(ctx, newObservabilityAddon(name, testHubNamspace)) + if err != nil { + t.Fatalf("failed to create hub oba to install: (%v)", err) + } + oba := newObservabilityAddon(name, testNamespace) + err = c.Create(ctx, oba) + if err != nil { + t.Fatalf("failed to create oba to install: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + // test reconcile successfully with all resources installed and finalizer set + promSvc := newPromSvc() + err = c.Create(ctx, promSvc) + if err != nil { + t.Fatalf("failed to create prom svc to install: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + rb := &rbacv1.ClusterRoleBinding{} + err = c.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName, + Namespace: ""}, rb) + if err != nil { + t.Fatalf("Required clusterrolebinding not created: (%v)", err) + } + cm := &corev1.ConfigMap{} + err = c.Get(ctx, types.NamespacedName{Name: caConfigmapName, + Namespace: namespace}, cm) + if err != nil { + t.Fatalf("Required configmap not created: (%v)", err) + } + deploy := &appv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, deploy) + if err != nil { + t.Fatalf("Metrics collector deployment not created: (%v)", err) + } + foundOba := &oav1beta1.ObservabilityAddon{} + err = hubClient.Get(ctx, types.NamespacedName{Name: obAddonName, + Namespace: hubNamespace}, foundOba) + if err != nil { + t.Fatalf("Failed to get observabilityAddon: (%v)", err) + } + if !contains(foundOba.Finalizers, obsAddonFinalizer) { + t.Fatal("Finalizer not set in observabilityAddon") + } + + // test reconcile w/o clusterversion(OCP 3.11) + c.Delete(ctx, cv) + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, deploy) + if err != nil { + t.Fatalf("Metrics collector deployment not created: (%v)", err) + } + commands := deploy.Spec.Template.Spec.Containers[0].Command + for _, cmd := range commands { + if strings.Contains(cmd, "clusterID=") && !strings.Contains(cmd, "test-cluster") { + t.Fatalf("Found wrong clusterID in command: (%s)", cmd) + } + } + + // test reconcile metrics collector deployment updated if cert secret updated + found := &appv1.Deployment{} + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, found) + if err != nil { + t.Fatalf("Metrics collector deployment not found: (%v)", err) + } + found.Status.ReadyReplicas = 1 + err = c.Update(ctx, found) + if err != nil { + t.Fatalf("Failed to update metrics collector deployment: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: mtlsCertName, + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile for update: (%v)", err) + } + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, deploy) + if err != nil { + t.Fatalf("Metrics collector deployment not found: (%v)", err) + } + if deploy.Spec.Template.ObjectMeta.Labels[restartLabel] == "" { + t.Fatal("Deployment not updated") + } + + // test reconcile metrics collector's replicas set to 0 if observability disabled + err = c.Delete(ctx, oba) + if err != nil { + t.Fatalf("failed to delete obsaddon to disable: (%v)", err) + } + oba = newObservabilityAddon(name, testNamespace) + oba.Spec.EnableMetrics = false + err = c.Create(ctx, oba) + if err != nil { + t.Fatalf("failed to create obsaddon to disable: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "disable", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile for disable: (%v)", err) + } + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, deploy) + if err != nil { + t.Fatalf("Metrics collector deployment not created: (%v)", err) + } + if *deploy.Spec.Replicas != 0 { + t.Fatalf("Replicas for metrics collector deployment is not set as 0, value is (%d)", *deploy.Spec.Replicas) + } + + // test reconcile all resources and finalizer are removed + err = c.Delete(ctx, oba) + if err != nil { + t.Fatalf("failed to delete obsaddon to delete: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "delete", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("reconcile for delete: (%v)", err) + } + err = c.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName, + Namespace: ""}, rb) + if !errors.IsNotFound(err) { + t.Fatalf("Required clusterrolebinding not deleted") + } + err = c.Get(ctx, types.NamespacedName{Name: caConfigmapName, + Namespace: namespace}, cm) + if !errors.IsNotFound(err) { + t.Fatalf("Required configmap not deleted") + } + err = c.Get(ctx, types.NamespacedName{Name: metricsCollectorName, + Namespace: namespace}, deploy) + if !errors.IsNotFound(err) { + t.Fatalf("Metrics collector deployment not deleted") + } + foundOba1 := &oav1beta1.ObservabilityAddon{} + err = hubClient.Get(ctx, types.NamespacedName{Name: obAddonName, + Namespace: hubNamespace}, foundOba1) + if err != nil { + t.Fatalf("Failed to get observabilityAddon: (%v)", err) + } + if contains(foundOba1.Finalizers, obsAddonFinalizer) { + t.Fatal("Finalizer not removed from observabilityAddon") + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go new file mode 100644 index 000000000..800b5bec3 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go @@ -0,0 +1,502 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/ghodss/yaml" + cmomanifests "github.com/openshift/cluster-monitoring-operator/pkg/manifests" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +const ( + hubAmAccessorSecretName = "observability-alertmanager-accessor" // #nosec + hubAmAccessorSecretKey = "token" + hubAmRouterCASecretName = "hub-alertmanager-router-ca" + hubAmRouterCASecretKey = "service-ca.crt" + clusterMonitoringConfigName = "cluster-monitoring-config" + clusterMonitoringConfigDataKey = "config.yaml" + clusterLabelKeyForAlerts = "cluster" +) + +// createHubAmRouterCASecret creates the secret that contains CA of the Hub's Alertmanager Route +func createHubAmRouterCASecret(ctx context.Context, hubInfo *operatorconfig.HubInfo, client client.Client, targetNamespace string) error { + hubAmRouterCA := hubInfo.AlertmanagerRouterCA + dataMap := map[string][]byte{hubAmRouterCASecretKey: []byte(hubAmRouterCA)} + hubAmRouterCASecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: hubAmRouterCASecretName, + Namespace: targetNamespace, + }, + Data: dataMap, + } + + found := &corev1.Secret{} + err := client.Get(ctx, types.NamespacedName{Name: hubAmRouterCASecretName, + Namespace: targetNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + err = client.Create(ctx, hubAmRouterCASecret) + if err != nil { + log.Error(err, "failed to create the hub-alertmanager-router-ca secret") + return err + } + log.Info("the hub-alertmanager-router-ca secret is created") + return nil + } else { + log.Error(err, "failed to check the hub-alertmanager-router-ca secret") + return err + } + } + + log.Info("the hub-alertmanager-router-ca secret already exists, check if it needs to be updated") + if reflect.DeepEqual(found.Data, dataMap) { + log.Info("no change for the hub-alertmanager-router-ca secret") + return nil + } else { + err = client.Update(ctx, hubAmRouterCASecret) + if err != nil { + log.Error(err, "failed to update the hub-alertmanager-router-ca secret") + return nil + } + log.Info("the hub-alertmanager-router-ca secret is updated") + return err + } +} + +// deleteHubAmRouterCASecret deletes the secret that contains CA of the Hub's Alertmanager Route +func deleteHubAmRouterCASecret(ctx context.Context, client client.Client, targetNamespace string) error { + found := &corev1.Secret{} + err := client.Get(ctx, types.NamespacedName{Name: hubAmRouterCASecretName, + Namespace: targetNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("the hub-alertmanager-router-ca secret is already deleted") + return nil + } + log.Error(err, "failed to check the hub-alertmanager-router-ca secret") + return err + } + err = client.Delete(ctx, found) + if err != nil { + log.Error(err, "error deleting the hub-alertmanager-router-ca secret") + return err + } + log.Info("the hub-alertmanager-router-ca secret is deleted") + return nil +} + +// createHubAmAccessorTokenSecret creates the secret that contains access token of the Hub's Alertmanager +func createHubAmAccessorTokenSecret(ctx context.Context, client client.Client, targetNamespace string) error { + amAccessorToken, err := getAmAccessorToken(ctx, client) + if err != nil { + return fmt.Errorf("fail to get the alertmanager accessor token %v", err) + } + + dataMap := map[string][]byte{hubAmAccessorSecretKey: []byte(amAccessorToken)} + hubAmAccessorTokenSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: hubAmAccessorSecretName, + Namespace: targetNamespace, + }, + Data: dataMap, + } + + found := &corev1.Secret{} + err = client.Get(ctx, types.NamespacedName{Name: hubAmAccessorSecretName, + Namespace: targetNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + err = client.Create(ctx, hubAmAccessorTokenSecret) + if err != nil { + log.Error(err, "failed to create the observability-alertmanager-accessor secret") + return err + } + log.Info("the observability-alertmanager-accessor secret is created") + return nil + } else { + log.Error(err, "failed to check the observability-alertmanager-accessor secret") + return err + } + } + + log.Info("the observability-alertmanager-accessor secret already exists, check if it needs to be updated") + if reflect.DeepEqual(found.Data, dataMap) { + log.Info("no change for the observability-alertmanager-accessor secret") + return nil + } else { + err = client.Update(ctx, hubAmAccessorTokenSecret) + if err != nil { + log.Error(err, "failed to update the observability-alertmanager-accessor secret") + return nil + } + log.Info("the observability-alertmanager-accessor secret is updated") + return err + } +} + +// deleteHubAmAccessorTokenSecret deletes the secret that contains access token of the Hub's Alertmanager +func deleteHubAmAccessorTokenSecret(ctx context.Context, client client.Client, targetNamespace string) error { + found := &corev1.Secret{} + err := client.Get(ctx, types.NamespacedName{Name: hubAmAccessorSecretName, + Namespace: targetNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("the observability-alertmanager-accessor secret is already deleted") + return nil + } + log.Error(err, "failed to check the observability-alertmanager-accessor secret") + return err + } + err = client.Delete(ctx, found) + if err != nil { + log.Error(err, "error deleting the observability-alertmanager-accessor secret") + return err + } + log.Info("the observability-alertmanager-accessor secret is deleted") + return nil +} + +// getAmAccessorToken retrieves the alertmanager access token from observability-alertmanager-accessor secret +func getAmAccessorToken(ctx context.Context, client client.Client) (string, error) { + amAccessorSecret := &corev1.Secret{} + if err := client.Get(ctx, types.NamespacedName{Name: hubAmAccessorSecretName, + Namespace: namespace}, amAccessorSecret); err != nil { + return "", err + } + + amAccessorToken := amAccessorSecret.Data[hubAmAccessorSecretKey] + if amAccessorToken == nil { + return "", fmt.Errorf("no token in secret %s", hubAmAccessorSecretName) + } + + return string(amAccessorToken), nil +} + +func newAdditionalAlertmanagerConfig(hubInfo *operatorconfig.HubInfo) cmomanifests.AdditionalAlertmanagerConfig { + return cmomanifests.AdditionalAlertmanagerConfig{ + Scheme: "https", + PathPrefix: "/", + APIVersion: "v2", + TLSConfig: cmomanifests.TLSConfig{ + CA: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: hubAmRouterCASecretName, + }, + Key: hubAmRouterCASecretKey, + }, + InsecureSkipVerify: false, + }, + BearerToken: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: hubAmAccessorSecretName, + }, + Key: hubAmAccessorSecretKey, + }, + StaticConfigs: []string{strings.TrimLeft(hubInfo.AlertmanagerEndpoint, "https://")}, + } +} + +// createOrUpdateClusterMonitoringConfig creates or updates the configmap +// cluster-monitoring-config and relevant resources (observability-alertmanager-accessor +// and hub-alertmanager-router-ca) for the openshift cluster monitoring stack +func createOrUpdateClusterMonitoringConfig( + ctx context.Context, + hubInfo *operatorconfig.HubInfo, + clusterID string, + client client.Client, + installProm bool) error { + targetNamespace := promNamespace + if installProm { + // for *KS, the hub CA and alertmanager access token should be created in namespace: open-cluster-management-addon-observability + targetNamespace = namespace + } + + // create the hub-alertmanager-router-ca secret if it doesn't exist or update it if needed + if err := createHubAmRouterCASecret(ctx, hubInfo, client, targetNamespace); err != nil { + log.Error(err, "failed to create or update the hub-alertmanager-router-ca secret") + return err + } + + // create the observability-alertmanager-accessor secret if it doesn't exist or update it if needed + if err := createHubAmAccessorTokenSecret(ctx, client, targetNamespace); err != nil { + log.Error(err, "failed to create or update the observability-alertmanager-accessor secret") + return err + } + + if installProm { + // no need to create configmap cluster-monitoring-config for *KS + return nil + } + + // init the prometheus k8s config + newExternalLabels := map[string]string{clusterLabelKeyForAlerts: clusterID} + newAlertmanagerConfigs := []cmomanifests.AdditionalAlertmanagerConfig{newAdditionalAlertmanagerConfig(hubInfo)} + newPmK8sConfig := &cmomanifests.PrometheusK8sConfig{ + // add cluster label for alerts from managed cluster + ExternalLabels: newExternalLabels, + // add alertmanager configs + AlertmanagerConfigs: newAlertmanagerConfigs, + } + + // root for CMO configuration + newClusterMonitoringConfiguration := cmomanifests.ClusterMonitoringConfiguration{ + PrometheusK8sConfig: newPmK8sConfig, + } + + // marshal new CMO configuration to json then to yaml + newClusterMonitoringConfigurationJSONBytes, err := json.Marshal(newClusterMonitoringConfiguration) + if err != nil { + log.Error(err, "failed to marshal the cluster monitoring config") + return err + } + newClusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML(newClusterMonitoringConfigurationJSONBytes) + if err != nil { + log.Error(err, "failed to transform JSON to YAML", "JSON", newClusterMonitoringConfigurationJSONBytes) + return err + } + + newCusterMonitoringConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterMonitoringConfigName, + Namespace: promNamespace, + }, + Data: map[string]string{clusterMonitoringConfigDataKey: string(newClusterMonitoringConfigurationYAMLBytes)}, + } + + // try to retrieve the current configmap in the cluster + found := &corev1.ConfigMap{} + err = client.Get(ctx, types.NamespacedName{Name: clusterMonitoringConfigName, + Namespace: promNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("configmap not found, try to create it", "name", clusterMonitoringConfigName) + err = client.Create(ctx, newCusterMonitoringConfigMap) + if err != nil { + log.Error(err, "failed to create configmap", "name", clusterMonitoringConfigName) + return err + } + log.Info("configmap created", "name", clusterMonitoringConfigName) + return nil + } else { + log.Error(err, "failed to check configmap", "name", clusterMonitoringConfigName) + return err + } + } + + log.Info("configmap already exists, check if it needs update", "name", clusterMonitoringConfigName) + foundClusterMonitoringConfigurationYAMLString, ok := found.Data[clusterMonitoringConfigDataKey] + if !ok { + log.Info("configmap data doesn't contain key, try to update it", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + // replace config.yaml in configmap + found.Data[clusterMonitoringConfigDataKey] = string(newClusterMonitoringConfigurationYAMLBytes) + err = client.Update(ctx, found) + if err != nil { + log.Error(err, "failed to update configmap", "name", clusterMonitoringConfigName) + return err + } + log.Info("configmap updated", "name", clusterMonitoringConfigName) + return nil + } + + log.Info("configmap already exists and key config.yaml exists, check if the value needs update", + "name", clusterMonitoringConfigName, + "key", clusterMonitoringConfigDataKey) + foundClusterMonitoringConfigurationJSONBytes, err := yaml.YAMLToJSON([]byte(foundClusterMonitoringConfigurationYAMLString)) + if err != nil { + log.Error(err, "failed to transform YAML to JSON", "YAML", foundClusterMonitoringConfigurationYAMLString) + return err + } + foundClusterMonitoringConfiguration := &cmomanifests.ClusterMonitoringConfiguration{} + if err := json.Unmarshal([]byte(foundClusterMonitoringConfigurationJSONBytes), foundClusterMonitoringConfiguration); err != nil { + log.Error(err, "failed to marshal the cluster monitoring config") + return err + } + + if foundClusterMonitoringConfiguration.PrometheusK8sConfig == nil { + foundClusterMonitoringConfiguration.PrometheusK8sConfig = newPmK8sConfig + } else { + // check if externalLabels exists + if foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels == nil { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels = newExternalLabels + } else { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels[clusterLabelKeyForAlerts] = clusterID + } + + // check if alertmanagerConfigs exists + if foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs == nil { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs = newAlertmanagerConfigs + } else { + additionalAlertmanagerConfigExists := false + for _, v := range foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs { + if v.TLSConfig != (cmomanifests.TLSConfig{}) && + v.TLSConfig.CA != nil && + v.TLSConfig.CA.LocalObjectReference != (corev1.LocalObjectReference{}) && + v.TLSConfig.CA.LocalObjectReference.Name == hubAmRouterCASecretName { + additionalAlertmanagerConfigExists = true + break + } + } + if !additionalAlertmanagerConfigExists { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs = append( + foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs, + newAdditionalAlertmanagerConfig(hubInfo)) + } + } + } + + // prepare to write back the cluster monitoring configuration + updatedClusterMonitoringConfigurationJSONBytes, err := json.Marshal(foundClusterMonitoringConfiguration) + if err != nil { + log.Error(err, "failed to marshal the cluster monitoring config") + return err + } + updatedclusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML(updatedClusterMonitoringConfigurationJSONBytes) + if err != nil { + log.Error(err, "failed to transform JSON to YAML", "JSON", updatedClusterMonitoringConfigurationJSONBytes) + return err + } + found.Data[clusterMonitoringConfigDataKey] = string(updatedclusterMonitoringConfigurationYAMLBytes) + err = client.Update(ctx, found) + if err != nil { + log.Error(err, "failed to update configmap", "name", clusterMonitoringConfigName) + return err + } + log.Info("configmap updated", "name", clusterMonitoringConfigName) + return nil +} + +// revertClusterMonitoringConfig reverts the configmap cluster-monitoring-config and relevant resources +// (observability-alertmanager-accessor and hub-alertmanager-router-ca) for the openshift cluster monitoring stack +func revertClusterMonitoringConfig(ctx context.Context, client client.Client, installProm bool) error { + targetNamespace := promNamespace + if installProm { + // for *KS, the hub CA and alertmanager access token are not created in namespace: open-cluster-management-addon-observability + targetNamespace = namespace + } + + // delete the hub-alertmanager-router-ca secret + if err := deleteHubAmRouterCASecret(ctx, client, targetNamespace); err != nil { + log.Error(err, "failed to delete the hub-alertmanager-router-ca secret") + return err + } + + // delete the observability-alertmanager-accessor secret + if err := deleteHubAmAccessorTokenSecret(ctx, client, targetNamespace); err != nil { + log.Error(err, "failed to delete the observability-alertmanager-accessor secret") + return err + } + + // try to retrieve the current configmap in the cluster + found := &corev1.ConfigMap{} + err := client.Get(ctx, types.NamespacedName{Name: clusterMonitoringConfigName, + Namespace: promNamespace}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("configmap not found, no need action", "name", clusterMonitoringConfigName) + return nil + } else { + log.Error(err, "failed to check configmap", "name", clusterMonitoringConfigName) + return err + } + } + + // revert the existing cluster-monitor-config configmap + log.Info("configmap exists, check if it needs revert", "name", clusterMonitoringConfigName) + foundClusterMonitoringConfigurationYAML, ok := found.Data[clusterMonitoringConfigDataKey] + if !ok { + log.Info("configmap data doesn't contain key, no need action", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + return nil + } + foundClusterMonitoringConfigurationJSON, err := yaml.YAMLToJSON([]byte(foundClusterMonitoringConfigurationYAML)) + if err != nil { + log.Error(err, "failed to transform YAML to JSON", "YAML", foundClusterMonitoringConfigurationYAML) + return err + } + + log.Info("configmap exists and key config.yaml exists, check if the value needs revert", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + foundClusterMonitoringConfiguration := &cmomanifests.ClusterMonitoringConfiguration{} + if err := json.Unmarshal([]byte(foundClusterMonitoringConfigurationJSON), foundClusterMonitoringConfiguration); err != nil { + log.Error(err, "failed to marshal the cluster monitoring config") + return err + } + + if foundClusterMonitoringConfiguration.PrometheusK8sConfig == nil { + log.Info("configmap data doesn't key: prometheusK8s, no need action", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + return nil + } else { + // check if externalLabels exists + if foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels != nil { + if _, ok := foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels[clusterLabelKeyForAlerts]; ok { + delete(foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels, clusterLabelKeyForAlerts) + } + if len(foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels) == 0 { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels = nil + } + } + + // check if alertmanagerConfigs exists + if foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs != nil { + copiedAlertmanagerConfigs := make([]cmomanifests.AdditionalAlertmanagerConfig, 0) + for _, v := range foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs { + if v.TLSConfig == (cmomanifests.TLSConfig{}) || + v.TLSConfig.CA == nil || + v.TLSConfig.CA.LocalObjectReference == (corev1.LocalObjectReference{}) || + v.TLSConfig.CA.LocalObjectReference.Name != hubAmRouterCASecretName { + copiedAlertmanagerConfigs = append(copiedAlertmanagerConfigs, v) + } + } + if len(copiedAlertmanagerConfigs) == 0 { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs = nil + if reflect.DeepEqual(*foundClusterMonitoringConfiguration.PrometheusK8sConfig, cmomanifests.PrometheusK8sConfig{}) { + foundClusterMonitoringConfiguration.PrometheusK8sConfig = nil + } + } else { + foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs = copiedAlertmanagerConfigs + } + } + } + + // check if the foundClusterMonitoringConfiguration is empty ClusterMonitoringConfiguration + if reflect.DeepEqual(*foundClusterMonitoringConfiguration, cmomanifests.ClusterMonitoringConfiguration{}) { + log.Info("empty ClusterMonitoringConfiguration, should delete configmap", "name", clusterMonitoringConfigName) + err = client.Delete(ctx, found) + if err != nil { + log.Error(err, "failed to delete configmap", "name", clusterMonitoringConfigName) + return err + } + log.Info("configmap delete", "name", clusterMonitoringConfigName) + return nil + } + + // prepare to write back the cluster monitoring configuration + updatedClusterMonitoringConfigurationJSONBytes, err := json.Marshal(foundClusterMonitoringConfiguration) + if err != nil { + log.Error(err, "failed to marshal the cluster monitoring config") + return err + } + updatedClusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML(updatedClusterMonitoringConfigurationJSONBytes) + if err != nil { + log.Error(err, "failed to transform JSON to YAML", "JSON", updatedClusterMonitoringConfigurationJSONBytes) + return err + } + found.Data[clusterMonitoringConfigDataKey] = string(updatedClusterMonitoringConfigurationYAMLBytes) + err = client.Update(ctx, found) + if err != nil { + log.Error(err, "failed to update configmap", "name", clusterMonitoringConfigName) + return err + } + log.Info("configmap updated", "name", clusterMonitoringConfigName) + return nil +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config_test.go new file mode 100644 index 000000000..ac9f4ae15 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config_test.go @@ -0,0 +1,251 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "encoding/json" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + yamltool "github.com/ghodss/yaml" + cmomanifests "github.com/openshift/cluster-monitoring-operator/pkg/manifests" + "gopkg.in/yaml.v2" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +const ( + hubInfoYAML = ` +cluster-name: "test-cluster" +endpoint: "http://test-endpoint" +alertmanager-endpoint: "http://test-alertamanger-endpoint" +alertmanager-router-ca: | + -----BEGIN CERTIFICATE----- + xxxxxxxxxxxxxxxxxxxxxxxxxxx + -----END CERTIFICATE----- +` +) + +func TestCreateDeleteHubAmRouterCASecret(t *testing.T) { + hubInfo := &operatorconfig.HubInfo{} + err := yaml.Unmarshal([]byte(hubInfoYAML), &hubInfo) + if err != nil { + t.Fatalf("Failed to unmarshal hubInfo: (%v)", err) + } + + hubInfoObj := newHubInfoSecret([]byte(hubInfoYAML)) + objs := []runtime.Object{hubInfoObj} + + ctx := context.TODO() + c := fake.NewFakeClient(objs...) + err = createHubAmRouterCASecret(ctx, hubInfo, c, promNamespace) + if err != nil { + t.Fatalf("Failed to create the hub-alertmanager-router-ca secret: (%v)", err) + } + err = deleteHubAmRouterCASecret(ctx, c, promNamespace) + if err != nil { + t.Fatalf("Failed to delete the hub-alertmanager-router-ca secret: (%v)", err) + } + err = deleteHubAmRouterCASecret(ctx, c, promNamespace) + if err != nil { + t.Fatalf("Run into error when try to delete hub-alertmanager-router-ca secret twice: (%v)", err) + } +} + +func TestCreateDeleteHubAmAccessorTokenSecret(t *testing.T) { + amAccessSrt := newAMAccessorSecret() + objs := []runtime.Object{amAccessSrt} + + ctx := context.TODO() + c := fake.NewFakeClient(objs...) + err := createHubAmAccessorTokenSecret(ctx, c, promNamespace) + if err != nil { + t.Fatalf("Failed to create the observability-alertmanager-accessor secret: (%v)", err) + } + err = deleteHubAmAccessorTokenSecret(ctx, c, promNamespace) + if err != nil { + t.Fatalf("Failed to delete the observability-alertmanager-accessor secret: (%v)", err) + } + err = deleteHubAmAccessorTokenSecret(ctx, c, promNamespace) + if err != nil { + t.Fatalf("Run into error when try to delete observability-alertmanager-accessor secret twice: (%v)", err) + } +} + +func TestClusterMonitoringConfig(t *testing.T) { + tests := []struct { + name string + ClusterMonitoringConfigCMExist bool + ClusterMonitoringConfigDataYaml string + ExpectedDeleteClusterMonitoringConfigCM bool + }{ + { + name: "no cluster-monitoring-config exists", + ClusterMonitoringConfigCMExist: false, + ExpectedDeleteClusterMonitoringConfigCM: true, + }, + { + name: "cluster-monitoring-config with empty config.yaml", + ClusterMonitoringConfigCMExist: true, + ClusterMonitoringConfigDataYaml: "", + ExpectedDeleteClusterMonitoringConfigCM: true, + }, + { + name: "cluster-monitoring-config with non-empty config.yaml and empty prometheusK8s", + ClusterMonitoringConfigCMExist: true, + ClusterMonitoringConfigDataYaml: ` +prometheusK8s: null`, + ExpectedDeleteClusterMonitoringConfigCM: true, + }, + { + name: "cluster-monitoring-config with non-empty config.yaml and prometheusK8s and empty additionalAlertManagerConfigs", + ClusterMonitoringConfigCMExist: true, + ClusterMonitoringConfigDataYaml: ` +prometheusK8s: + additionalAlertManagerConfigs: null`, + ExpectedDeleteClusterMonitoringConfigCM: true, + }, + { + name: "cluster-monitoring-config with non-empty config.yaml and prometheusK8s and additionalAlertManagerConfigs", + ClusterMonitoringConfigCMExist: true, + ClusterMonitoringConfigDataYaml: ` +prometheusK8s: + additionalAlertManagerConfigs: + - apiVersion: v2 + bearerToken: + key: token + name: foo + pathPrefix: / + scheme: https + staticConfigs: + - test-host.com + tlsConfig: + insecureSkipVerify: true`, + ExpectedDeleteClusterMonitoringConfigCM: false, + }, + } + + hubInfo := &operatorconfig.HubInfo{} + err := yaml.Unmarshal([]byte(hubInfoYAML), &hubInfo) + if err != nil { + t.Fatalf("Failed to unmarshal hubInfo: (%v)", err) + } + hubInfoObj := newHubInfoSecret([]byte(hubInfoYAML)) + amAccessSrt := newAMAccessorSecret() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{hubInfoObj, amAccessSrt} + if tt.ClusterMonitoringConfigCMExist { + objs = append(objs, newClusterMonitoringConfigCM(tt.ClusterMonitoringConfigDataYaml)) + } + testCreateOrUpdateClusterMonitoringConfig(t, hubInfo, fake.NewFakeClient(objs...), tt.ExpectedDeleteClusterMonitoringConfigCM) + }) + } +} + +func testCreateOrUpdateClusterMonitoringConfig(t *testing.T, hubInfo *operatorconfig.HubInfo, c client.Client, expectedCMDelete bool) { + ctx := context.TODO() + err := createOrUpdateClusterMonitoringConfig(ctx, hubInfo, testClusterID, c, false) + if err != nil { + t.Fatalf("Failed to create or update the cluster-monitoring-config configmap: (%v)", err) + } + + foundCusterMonitoringConfigMap := &corev1.ConfigMap{} + err = c.Get(ctx, types.NamespacedName{Name: clusterMonitoringConfigName, + Namespace: promNamespace}, foundCusterMonitoringConfigMap) + if err != nil { + t.Fatalf("failed to check configmap %s: %v", clusterMonitoringConfigName, err) + } + + foundClusterMonitoringConfigurationYAML, ok := foundCusterMonitoringConfigMap.Data[clusterMonitoringConfigDataKey] + if !ok { + t.Fatalf("configmap: %s doesn't contain key: %s", clusterMonitoringConfigName, clusterMonitoringConfigDataKey) + } + foundClusterMonitoringConfigurationJSON, err := yamltool.YAMLToJSON([]byte(foundClusterMonitoringConfigurationYAML)) + if err != nil { + t.Fatalf("failed to transform YAML to JSON:\n%s\n", foundClusterMonitoringConfigurationYAML) + } + + foundClusterMonitoringConfiguration := &cmomanifests.ClusterMonitoringConfiguration{} + if err := json.Unmarshal([]byte(foundClusterMonitoringConfigurationJSON), foundClusterMonitoringConfiguration); err != nil { + t.Fatalf("failed to marshal the cluster monitoring config: %v:\n%s\n", err, foundClusterMonitoringConfigurationJSON) + } + + if foundClusterMonitoringConfiguration.PrometheusK8sConfig == nil { + t.Fatalf("empty prometheusK8s in ClusterMonitoringConfiguration: %v", foundClusterMonitoringConfiguration) + } + + if foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs == nil { + t.Fatalf("empty AlertmanagerConfigs in ClusterMonitoringConfiguration.PrometheusK8sConfig: %v", foundClusterMonitoringConfiguration) + } + + containsOCMAlertmanagerConfig := false + for _, v := range foundClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs { + if v.TLSConfig != (cmomanifests.TLSConfig{}) && + v.TLSConfig.CA != nil && + v.TLSConfig.CA.LocalObjectReference != (corev1.LocalObjectReference{}) && + v.TLSConfig.CA.LocalObjectReference.Name == hubAmRouterCASecretName && + v.BearerToken != nil && + v.BearerToken.LocalObjectReference != (corev1.LocalObjectReference{}) && + v.BearerToken.LocalObjectReference.Name == hubAmAccessorSecretName { + containsOCMAlertmanagerConfig = true + foundHubAmAccessorSecret := &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: v.BearerToken.LocalObjectReference.Name, + Namespace: promNamespace}, foundHubAmAccessorSecret) + if err != nil { + t.Fatalf("failed to check the observability-alertmanager-accessor secret %s: %v", clusterMonitoringConfigName, err) + } + foundAmAccessorToken, ok := foundHubAmAccessorSecret.Data[hubAmAccessorSecretKey] + if !ok { + t.Fatalf("no key %s found in the observability-alertmanager-accessor secret", hubAmAccessorSecretKey) + } + if string(foundAmAccessorToken) != testBearerToken { + t.Fatalf("incorrect token found in the observability-alertmanager-accessor secret, got token: %s, expected value %s", foundAmAccessorToken, testBearerToken) + } + } + } + + if containsOCMAlertmanagerConfig == false { + t.Fatalf("no AlertmanagerConfig for OCM in ClusterMonitoringConfiguration.PrometheusK8sConfig.AlertmanagerConfigs: %v", foundClusterMonitoringConfiguration) + } + + err = revertClusterMonitoringConfig(ctx, c, false) + if err != nil { + t.Fatalf("Failed to revert cluster-monitoring-config configmap: (%v)", err) + } + + err = c.Get(ctx, types.NamespacedName{Name: clusterMonitoringConfigName, + Namespace: promNamespace}, foundCusterMonitoringConfigMap) + if expectedCMDelete { + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("the configmap %s should be deleted", clusterMonitoringConfigName) + } + } + + foundHubAmAccessorSecret := &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: hubAmAccessorSecretName, + Namespace: promNamespace}, foundHubAmAccessorSecret) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("the secret %s should be deleted", hubAmAccessorSecretName) + } + + foundHubAmRouterCASecret := &corev1.Secret{} + err = c.Get(ctx, types.NamespacedName{Name: hubAmRouterCASecretName, + Namespace: promNamespace}, foundHubAmRouterCASecret) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("the secret %s should be deleted", hubAmRouterCASecretName) + } + + err = revertClusterMonitoringConfig(ctx, c, false) + if err != nil { + t.Fatalf("Run into error when try to revert cluster-monitoring-config configmap twice: (%v)", err) + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource.go new file mode 100644 index 000000000..c74a5d667 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource.go @@ -0,0 +1,196 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "os" + "reflect" + + ocinfrav1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + clusterRoleBindingName = "metrics-collector-view" + caConfigmapName = "metrics-collector-serving-certs-ca-bundle" +) + +var ( + serviceAccountName = os.Getenv("SERVICE_ACCOUNT") +) + +func deleteMonitoringClusterRoleBinding(ctx context.Context, client client.Client) error { + rb := &rbacv1.ClusterRoleBinding{} + err := client.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName, + Namespace: ""}, rb) + if err != nil { + if errors.IsNotFound(err) { + log.Info("clusterrolebinding already deleted") + return nil + } + log.Error(err, "Failed to check the clusterrolebinding") + return err + } + err = client.Delete(ctx, rb) + if err != nil { + log.Error(err, "Error deleting clusterrolebinding") + return err + } + log.Info("clusterrolebinding deleted") + return nil +} + +func createMonitoringClusterRoleBinding(ctx context.Context, client client.Client) error { + rb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + Annotations: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "cluster-monitoring-view", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccountName, + Namespace: namespace, + }, + }, + } + + found := &rbacv1.ClusterRoleBinding{} + err := client.Get(ctx, types.NamespacedName{Name: clusterRoleBindingName, + Namespace: ""}, found) + if err != nil { + if errors.IsNotFound(err) { + err = client.Create(ctx, rb) + if err == nil { + log.Info("clusterrolebinding created") + } else { + log.Error(err, "Failed to create the clusterrolebinding") + } + return err + } + log.Error(err, "Failed to check the clusterrolebinding") + return err + } + + if reflect.DeepEqual(rb.RoleRef, found.RoleRef) && reflect.DeepEqual(rb.Subjects, found.Subjects) { + log.Info("The clusterrolebinding already existed") + } else { + rb.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = client.Update(ctx, rb) + if err != nil { + log.Error(err, "Failed to update the clusterrolebinding") + } + } + + return nil +} + +func deleteCAConfigmap(ctx context.Context, client client.Client) error { + cm := &corev1.ConfigMap{} + err := client.Get(ctx, types.NamespacedName{Name: caConfigmapName, + Namespace: namespace}, cm) + if err != nil { + if errors.IsNotFound(err) { + log.Info("configmap already deleted") + return nil + } + log.Error(err, "Failed to check the configmap") + return err + } + err = client.Delete(ctx, cm) + if err != nil { + log.Error(err, "Error deleting configmap") + return err + } + log.Info("configmap deleted") + return nil +} + +func createCAConfigmap(ctx context.Context, client client.Client) error { + cm := &corev1.ConfigMap{} + err := client.Get(ctx, types.NamespacedName{Name: caConfigmapName, + Namespace: namespace}, cm) + if err != nil { + if errors.IsNotFound(err) { + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: caConfigmapName, + Namespace: namespace, + Annotations: map[string]string{ + ownerLabelKey: ownerLabelValue, + "service.alpha.openshift.io/inject-cabundle": "true", + }, + }, + Data: map[string]string{"service-ca.crt": ""}, + } + err = client.Create(ctx, cm) + if err == nil { + log.Info("Configmap created") + } else { + log.Error(err, "Failed to create the configmap") + } + return err + } else { + log.Error(err, "Failed to check the configmap") + return err + } + } else { + log.Info("The configmap already existed") + } + return nil +} + +// getClusterID is used to get the cluster uid +func getClusterID(ctx context.Context, c client.Client) (string, error) { + clusterVersion := &ocinfrav1.ClusterVersion{} + if err := c.Get(ctx, types.NamespacedName{Name: "version"}, clusterVersion); err != nil { + log.Error(err, "Failed to get clusterVersion") + return "", err + } + + return string(clusterVersion.Spec.ClusterID), nil +} + +func isSNO(ctx context.Context, c client.Client) (bool, error) { + infraConfig := &ocinfrav1.Infrastructure{} + if err := c.Get(ctx, types.NamespacedName{Name: "cluster"}, infraConfig); err != nil { + log.Info("No OCP infrastructure found, determine SNO by checking node size") + return isSingleNode(ctx, c) + } + if infraConfig.Status.ControlPlaneTopology == ocinfrav1.SingleReplicaTopologyMode { + return true, nil + } + + return false, nil +} + +func isSingleNode(ctx context.Context, c client.Client) (bool, error) { + nodes := &corev1.NodeList{} + opts := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{"node-role.kubernetes.io/master": ""}), + } + err := c.List(ctx, nodes, opts) + if err != nil { + log.Error(err, "Failed to get node list") + return false, err + } + if len(nodes.Items) == 1 { + return true, nil + } + return false, nil +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource_test.go new file mode 100644 index 000000000..e874a3f62 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_resource_test.go @@ -0,0 +1,107 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "context" + "testing" + + ocinfrav1 "github.com/openshift/api/config/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + testClusterID = "kind-cluster-id" +) + +var ( + cv = &ocinfrav1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{Name: "version"}, + Spec: ocinfrav1.ClusterVersionSpec{ + ClusterID: testClusterID, + }, + } + infra = &ocinfrav1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: ocinfrav1.InfrastructureStatus{ + ControlPlaneTopology: ocinfrav1.SingleReplicaTopologyMode, + }, + } +) + +func TestCreateDeleteCAConfigmap(t *testing.T) { + ctx := context.TODO() + c := fake.NewFakeClient() + err := createCAConfigmap(ctx, c) + if err != nil { + t.Fatalf("Failed to create CA configmap: (%v)", err) + } + err = deleteCAConfigmap(ctx, c) + if err != nil { + t.Fatalf("Failed to delete CA configmap: (%v)", err) + } + err = deleteCAConfigmap(ctx, c) + if err != nil { + t.Fatalf("Run into error when try to delete CA configmap twice: (%v)", err) + } +} + +func TestCreateDeleteMonitoringClusterRoleBinding(t *testing.T) { + ctx := context.TODO() + c := fake.NewFakeClient() + err := createMonitoringClusterRoleBinding(ctx, c) + if err != nil { + t.Fatalf("Failed to create clusterrolebinding: (%v)", err) + } + rb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + Annotations: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + ResourceVersion: "1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "cluster-monitoring-view-test", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccountName, + Namespace: namespace, + }, + }, + } + err = c.Update(context.TODO(), rb) + if err != nil { + t.Fatalf("Failed to update clusterrolebinding: (%v)", err) + } + err = createMonitoringClusterRoleBinding(ctx, c) + if err != nil { + t.Fatalf("Failed to revert clusterrolebinding: (%v)", err) + } + err = deleteMonitoringClusterRoleBinding(ctx, c) + if err != nil { + t.Fatalf("Failed to delete clusterrolebinding: (%v)", err) + } + err = deleteMonitoringClusterRoleBinding(ctx, c) + if err != nil { + t.Fatalf("Run into error when try to delete delete clusterrolebinding twice: (%v)", err) + } +} + +func TestGetClusterID(t *testing.T) { + ctx := context.TODO() + c := fake.NewFakeClient(cv) + found, err := getClusterID(ctx, c) + if err != nil { + t.Fatalf("Failed to get clusterversion: (%v)", err) + } + if found != testClusterID { + t.Fatalf("Got wrong cluster id" + found) + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func.go b/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func.go new file mode 100644 index 000000000..80b71d55d --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func.go @@ -0,0 +1,73 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "fmt" + "reflect" + "strings" + + v1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +func getPred(name string, namespace string, + create bool, update bool, delete bool) predicate.Funcs { + createFunc := func(e event.CreateEvent) bool { + return false + } + updateFunc := func(e event.UpdateEvent) bool { + return false + } + deleteFunc := func(e event.DeleteEvent) bool { + return false + } + if create { + createFunc = func(e event.CreateEvent) bool { + if e.Object.GetName() == name && e.Object.GetNamespace() == namespace { + return true + } + return false + } + } + if update { + updateFunc = func(e event.UpdateEvent) bool { + if e.ObjectNew.GetName() == name && e.ObjectNew.GetNamespace() == namespace && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + // also check objectNew string in case Kind is empty + if strings.HasPrefix(fmt.Sprint(e.ObjectNew), "&Deployment") || + e.ObjectNew.GetObjectKind().GroupVersionKind().Kind == "Deployment" { + if !reflect.DeepEqual(e.ObjectNew.(*v1.Deployment).Spec.Template.Spec, + e.ObjectOld.(*v1.Deployment).Spec.Template.Spec) { + return true + } + } else if e.ObjectNew.GetName() == obAddonName || + e.ObjectNew.GetObjectKind().GroupVersionKind().Kind == "ObservabilityAddon" { + if !reflect.DeepEqual(e.ObjectNew.(*oav1beta1.ObservabilityAddon).Spec, + e.ObjectOld.(*oav1beta1.ObservabilityAddon).Spec) { + return true + } + } else { + return true + } + } + return false + } + } + if delete { + deleteFunc = func(e event.DeleteEvent) bool { + if e.Object.GetName() == name && e.Object.GetNamespace() == namespace { + return true + } + return false + } + } + return predicate.Funcs{ + CreateFunc: createFunc, + UpdateFunc: updateFunc, + DeleteFunc: deleteFunc, + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func_test.go new file mode 100644 index 000000000..f413b2422 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/predicate_func_test.go @@ -0,0 +1,161 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" +) + +func TestPredFunc(t *testing.T) { + name := "test-obj" + caseList := []struct { + caseName string + namespace string + create bool + update bool + delete bool + expectedCreate bool + expectedUpdate bool + expectedDelete bool + }{ + { + caseName: "All false", + namespace: testNamespace, + create: false, + update: false, + delete: false, + expectedCreate: false, + expectedUpdate: false, + expectedDelete: false, + }, + { + caseName: "All true", + namespace: testNamespace, + create: true, + update: true, + delete: true, + expectedCreate: true, + expectedUpdate: true, + expectedDelete: true, + }, + { + caseName: "All true for cluster scope obj", + namespace: "", + create: true, + update: true, + delete: true, + expectedCreate: true, + expectedUpdate: true, + expectedDelete: true, + }, + } + + for _, c := range caseList { + t.Run(c.caseName, func(t *testing.T) { + pred := getPred(name, c.namespace, c.create, c.update, c.delete) + ce := event.CreateEvent{ + Object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(2), + }, + }, + } + if c.expectedCreate { + if !pred.CreateFunc(ce) { + t.Fatalf("pre func return false on applied createevent in case: (%v)", c.caseName) + } + ce.Object.SetName(name + "test") + if pred.CreateFunc(ce) { + t.Fatalf("pre func return true on different obj name in case: (%v)", c.caseName) + } + } else { + if pred.CreateFunc(ce) { + t.Fatalf("pre func return true on non-applied createevent in case: (%v)", c.caseName) + } + } + + ue := event.UpdateEvent{ + ObjectNew: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.namespace, + ResourceVersion: "2", + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + ServiceAccountName: "sa1", + }, + }, + }, + }, + ObjectOld: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.namespace, + ResourceVersion: "1", + }, + Spec: appsv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + ServiceAccountName: "sa2", + }, + }, + }, + }, + } + if c.expectedUpdate { + if !pred.UpdateFunc(ue) { + t.Fatalf("pre func return false on applied update event in case: (%v)", c.caseName) + } + ue.ObjectNew.SetResourceVersion("1") + if pred.UpdateFunc(ue) { + t.Fatalf("pre func return true on same resource version in case: (%v)", c.caseName) + } + ue.ObjectNew.SetResourceVersion("2") + ue.ObjectNew.(*appsv1.Deployment).Spec.Template.Spec.ServiceAccountName = "sa2" + if pred.UpdateFunc(ue) { + t.Fatalf("pre func return true on same deployment spec in case: (%v)", c.caseName) + } + } else { + if pred.UpdateFunc(ue) { + t.Fatalf("pre func return true on non-applied updateevent in case: (%v)", c.caseName) + } + } + + de := event.DeleteEvent{ + Object: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: c.namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: int32Ptr(2), + }, + }, + } + if c.expectedDelete { + if !pred.DeleteFunc(de) { + t.Fatalf("pre func return false on applied deleteevent in case: (%v)", c.caseName) + } + de.Object.SetName(name + "test") + if pred.DeleteFunc(de) { + t.Fatalf("pre func return true on different obj name in case: (%v)", c.caseName) + } + } else { + if pred.DeleteFunc(de) { + t.Fatalf("HubInpre funcfoPred return true on deleteevent in case: (%v)", c.caseName) + } + } + }) + } +} diff --git a/operators/endpointmetrics/controllers/status/status_controller.go b/operators/endpointmetrics/controllers/status/status_controller.go new file mode 100644 index 000000000..0bcb8d84a --- /dev/null +++ b/operators/endpointmetrics/controllers/status/status_controller.go @@ -0,0 +1,101 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package status + +import ( + "context" + "os" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +var ( + log = ctrl.Log.WithName("controllers").WithName("Status") +) + +const ( + obAddonName = "observability-addon" +) + +var ( + namespace = os.Getenv("WATCH_NAMESPACE") + hubNamespace = os.Getenv("HUB_NAMESPACE") +) + +// StatusReconciler reconciles status object +type StatusReconciler struct { + Client client.Client + Scheme *runtime.Scheme + HubClient client.Client +} + +// Reconcile reads that state of the cluster for a ObservabilityAddon object and makes changes based on the state read +// and what is in the ObservabilityAddon.Status +// The Controller will requeue the Request to be processed again if the returned error is non-nil or +// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. +func (r *StatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) + log.Info("Reconciling") + + // Fetch the ObservabilityAddon instance in hub cluster + hubObsAddon := &oav1beta1.ObservabilityAddon{} + err := r.HubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: hubNamespace}, hubObsAddon) + if err != nil { + log.Error(err, "Failed to get observabilityaddon in hub cluster", "namespace", hubNamespace) + return ctrl.Result{}, err + } + + // Fetch the ObservabilityAddon instance in local cluster + obsAddon := &oav1beta1.ObservabilityAddon{} + err = r.Client.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: namespace}, obsAddon) + if err != nil { + log.Error(err, "Failed to get observabilityaddon", "namespace", namespace) + return ctrl.Result{}, err + } + + hubObsAddon.Status = obsAddon.Status + + err = r.HubClient.Status().Update(ctx, hubObsAddon) + if err != nil { + log.Error(err, "Failed to update status for observabilityaddon in hub cluster", "namespace", hubNamespace) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *StatusReconciler) SetupWithManager(mgr ctrl.Manager) error { + if os.Getenv("NAMESPACE") != "" { + namespace = os.Getenv("NAMESPACE") + } + + pred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetNamespace() == namespace && + !reflect.DeepEqual(e.ObjectNew.(*oav1beta1.ObservabilityAddon).Status, + e.ObjectOld.(*oav1beta1.ObservabilityAddon).Status) { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + return ctrl.NewControllerManagedBy(mgr). + For(&oav1beta1.ObservabilityAddon{}, builder.WithPredicates(pred)). + Complete(r) +} diff --git a/operators/endpointmetrics/controllers/status/status_controller_test.go b/operators/endpointmetrics/controllers/status/status_controller_test.go new file mode 100644 index 000000000..5686e1371 --- /dev/null +++ b/operators/endpointmetrics/controllers/status/status_controller_test.go @@ -0,0 +1,114 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package status + +import ( + "context" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + name = "observability-addon" + testNamespace = "test-ns" + testHubNamspace = "test-hub-ns" +) + +func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { + return &oav1beta1.ObservabilityAddon{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: oashared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 60, + }, + } +} + +func init() { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + oav1beta1.AddToScheme(s) + + namespace = testNamespace + hubNamespace = testHubNamspace +} + +func TestStatusController(t *testing.T) { + + hubClient := fake.NewFakeClient() + c := fake.NewFakeClient() + + r := &StatusReconciler{ + Client: c, + HubClient: hubClient, + } + + // test error in reconcile if missing obervabilityaddon + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + ctx := context.TODO() + _, err := r.Reconcile(ctx, req) + if err == nil { + t.Fatalf("reconcile: miss the error for missing obervabilityaddon") + } + + // test status in local pushed to hub + err = hubClient.Create(ctx, newObservabilityAddon(name, testHubNamspace)) + if err != nil { + t.Fatalf("failed to create hub oba to install: (%v)", err) + } + + oba := newObservabilityAddon(name, testNamespace) + oba.Status = oav1beta1.ObservabilityAddonStatus{ + Conditions: []oav1beta1.StatusCondition{ + { + Type: "Deployed", + Status: metav1.ConditionTrue, + Reason: "Deployed", + Message: "Metrics collector deployed", + }, + }, + } + err = c.Create(ctx, oba) + if err != nil { + t.Fatalf("failed to create oba to install: (%v)", err) + } + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "install", + Namespace: testNamespace, + }, + } + _, err = r.Reconcile(ctx, req) + if err != nil { + t.Fatalf("Failed to reconcile: (%v)", err) + } + hubObsAddon := &oav1beta1.ObservabilityAddon{} + err = hubClient.Get(ctx, types.NamespacedName{Name: obAddonName, Namespace: testHubNamspace}, hubObsAddon) + if err != nil { + t.Fatalf("Failed to get oba in hub: (%v)", err) + } + + if hubObsAddon.Status.Conditions == nil || len(hubObsAddon.Status.Conditions) != 1 { + t.Fatalf("No correct status set in hub observabilityaddon: (%v)", hubObsAddon) + } else if hubObsAddon.Status.Conditions[0].Type != "Deployed" { + t.Fatalf("Wrong status type: (%v)", hubObsAddon.Status) + } +} diff --git a/operators/endpointmetrics/doc/API_design.md b/operators/endpointmetrics/doc/API_design.md new file mode 100644 index 000000000..d74aec2ce --- /dev/null +++ b/operators/endpointmetrics/doc/API_design.md @@ -0,0 +1,68 @@ +## ObservabilityAddon API + +### API Design: + +The requirement doc is located in [here](https://docs.google.com/document/d/1qawBUo8VcdBXuXzZl8sypIug1nLsUEm_5Yy0qENZ-aU) + +ObservabilityAddon CR is namespace scoped and located in each managed cluster namespace in hub side if observability addon is enabled for that managed cluster. The initial instance will be created by ACM in the managed cluster namespace as part of managed cluster import/create process and users can customize it later. One CR includes two sections: one for spec and the other for status. + +Group of this CR is observability.open-cluster-management.io + +version is v1beta1 + +kind is ObservabilityAddon + +**ObservabilityAddon Spec**: the specification and status for the metrics collector in one managed cluster + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +enableMetrics | Push metrics or not | no | true | bool +metricsConfigs| Metrics collection configurations | no | n/a | MetricsConfigs + + +**MetricsConfigs Spec**: the specification for metrics collected from local prometheus and pushed to hub server + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +interval | Interval for the metrics collector push metrics to hub server| no | 1m | string + + +**ObservabilityAddon Status**: the status for current CR. It's updated by the metrics collector + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +status | Status contains the different condition statuses for this managed cluster | n/a | [] | []Condtions + +**Conditions** +type | reason | message +---- | ------ | ------- +Ready | Deployed | Metrics collector deployed and functional +Disabled | Disabled | enableMetrics is set to False +NotSupported | NotSupported | Observability is not supported in this cluster + +### Samples + +Here is a sample ObservabilityAddon CR + +``` +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: ObservabilityAddon +metadata: + name: sample-endpointmonitoring +spec: + enableMetrics: true + metricsConfigs: + interval: 1m +status: + conditions: + - type: Ready + status: 'True' + lastTransitionTime: '2020-07-23T16:18:46Z' + reason: Deployed + message: Metrics collector deployed and functional + - type: Disabled + status: 'True' + lastTransitionTime: '2020-07-23T15:18:46Z' + reason: Disabled + message: enableMetrics is set to False +``` diff --git a/operators/endpointmetrics/doc/API_design_future.md b/operators/endpointmetrics/doc/API_design_future.md new file mode 100644 index 000000000..ccd8dcff0 --- /dev/null +++ b/operators/endpointmetrics/doc/API_design_future.md @@ -0,0 +1,96 @@ +## ManagedCluster Monitoring API + +### API Design: + +The requirement doc is located in [here](https://docs.google.com/document/d/1qawBUo8VcdBXuXzZl8sypIug1nLsUEm_5Yy0qENZ-aU) + +ObservabilityAddon CR is namespace scoped and located in each cluster namespace in hub side if monitoring feature is enabled for that managed cluster. Hub operator will generate the default one in the cluster namespace and users can customize it later. One CR includes two sections: one for spec and the other for status. + +Group of this CR is observability.open-cluster-management.io, version is v1beta1, kind is ObservabilityAddon + +**ObservabilityAddon** Spec: describe the specification and status for the metrics collector in one managed cluster + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +enableMetrics | Push metrics or not | yes | true | bool +metricsConfigs| Metrics collection configurations | yes | n/a | MetricsConfigs + + +**MetricsConfigs Spec**: describe the specification for metrics collected from local prometheus and pushed to hub server + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +metricsSource | The server configuration to get metrics from | no | n/a | MetricsSource +interval | Interval to collect&push metrics | yes | 1m | string +allowlistConfigMaps | List of configmap name. For each configmap it contains the allowlist for metrics pushed to hub. It only includes the metrics customized by users. The default metrics will also be pushed even if this value is empty. | no | n/a | []string +scrapeTargets | Additional scrape targets added to local prometheus to scrape additional metrics. The metrics scraped from the new added scrape targets will be included in the allowlist of metrics.(filter the metrics using {job=”SERVICE_MONITOR_NAME”}) | no | n/a | [][ServiceMonitorSpec](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitorspec) +rules | List for alert rules and recording rules. The metrics defined in the new-added recording rules will be included in the allowlist of metrics. | no | n/a | [][Rule](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rule + +**MetricsSource Spec**: describe the information to get the metrics + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +serverURL | The server url is to get metrics from | yes | https://prometheus-k8s.openshift-monitoring.svc:9091 | string +tlsConfig | A file containing the CA certificate to use to verify the Prometheus server | no | n/a | *[TLSConfig](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig) + +**ObservabilityAddon Status**: describe the status for current CR. It's updated by the metrics collector + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +conditions | Conditions contains the different condition statuses for this managed cluster | no | [] | []Condtions + +**Condition**: describe the condition status for current CR. + +name | description | required | default | schema +---- | ----------- | -------- | ------- | ------ +lastTransitionTime | Last time the condition transit from one status to another | yes | n/a | Time +status | Status of the condition, one of True, False, Unknown | yes | n/a | string +reason | (brief) reason for the condition's last transition | yes | n/a | string +message | Human readable message indicating details about last transition | yes | n/a | string +type | Type of node condition | yes | n/a | string + + + +### Samples + +Here is a sample ObservabilityAddon CR + +``` +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: ObservabilityAddon +metadata: + name: sample-endpointmonitoring +spec: + enableMetrics: true + metricsConfigs: + interval: 1m + metricsSource: + serverUrl: https://***** + tlsConfig: + ca: local-ca-secret + cert: local-cert-secret + allowlistConfigMaps: + - sample-allowlist + scrapeTargets: + - endpoints: + - interval: 30s + port: web + scheme: https + namespaceSelector: {} + selector: + matchLabels: + alertmanager: test + rules: + - alert: HighRequestLatency + expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + for: 10m + - record: job:http_inprogress_requests:sum + expr: sum by (job) (http_inprogress_requests) +status: + conditions: + - type: Available + status: 'True' + lastTransitionTime: '2020-07-23T16:18:46Z' + reason: ClientCreated + message: The metrics collector client deployment created +``` diff --git a/operators/endpointmetrics/doc/ObservabilityAddon_Flow_Sprint1.png b/operators/endpointmetrics/doc/ObservabilityAddon_Flow_Sprint1.png new file mode 100644 index 0000000000000000000000000000000000000000..f6999e88198e80f260de401451dc417c3064bb2d GIT binary patch literal 35809 zcmeFZcR1C5_&=^mk&!|&qG;Ir*g40Z$6jR}^Vr)lvz3(wB_$L}l7vKv5JED_-g}Si z@x5R5Uf=8beE<3V@%!s@UGJ+m&dc-leBR@6KkoZ^-)>^n6;2$dKTbqMbV5l{R*Q(} zkPs2kfhLlp@RN&gb@Pdch@W`K8F)DQSlc;R5wQti{(Qy8&ui=K?!hJ?%f`=d?&892 zX=iTZX71?D?PTQvKZ4I4oh|LG?W`>SJj2h+&&$P&;^Gz5;TK>Nkm2Qre}s6s`FT-B zf1WqDv2yxrK_PBlSm2VHInK_>!`a>T@}DQHJj`wWtmeq;>Z=j`<7F+KrqA#Tw>A9xU4tp5De#@X4%!3z1QfE*m?ubsmS?f%-UzPq4< zkExxClDC5go}gwUD#LH_XAviJ$G;ZA!gjg%{wyM*=V56ttmZ6_<>j+7^7fFkFi1pF#(6YYn7O*R{v!RBv4W3Wgh?kd_-+))eKt{&_ zrv|@gYk-%-Yp6Nt8+hxPyC|V>a=sP>tcACgw=N1V;El7vduU@l90~jmjy5<8Z$4j5 z3k4x_1yMm4CzOsMETFCCVXkc`uf(gWY-Op&Yj2?Ah?PMp3p%*-E6ZqFtLvjI)QpT! z!h)*m7%L4y9S5|pssZds+d^K=L)1`E6l=kc=2dkvH_&quRCd&tmDe>;;MLGla`!P% z*TSN(N=iE37EU+^H&s4cSs8T$X9HCa&^y{dozF!;mSCi$>I~a(aaSW4sCql0h0IYP zaCrjS$(>-OAmrkX#d>&axbteetIMkK>k!-pGRXfP_Yr>>owGRj-Y z!P80KLEb^YNY};QM^973RajP7&&pE9LC#3U-JD?I>mZL(_7$|zRS?iuHrM2pwGp!Q zany8k9BaFq&3|qAH3`vYG^YVT`=0je(D* zy`X|DudbZ1o}j)18m+91gI|}?^ueL+d=&Kc+`V1w&9%I&&x%!fEAQ?P|;8|M;YQh zym*B@t&}XCW%;c=^bBSAdC@pSBVly|SxXOaD71i;lY*+Qpn;mXlYs+4NDpT&tg1rL z*6~nPapM=&HdjPj>WL_;tGnT}M@T-T>~u#3LmIT9xJS`Bqw8OXQOGrXJ})i>;<2AIpA!)Y&EqB7OH|u zI6DkRz){4`R?$#NS5H;MN#4lE0Hfo{tDq>aVTDz4Hgu4~<8-t%WdsPiymFeB;MDxW z3L>f&GNQbq{OUf4+O=dIE$wwxJUta{ee@hv1r@Yhd6n=o)}F2?2OnQiTd+lWoTewA zlQ+S@Sw+~0QTe!eT_VLZFEFc zc^zPJ2LU-Rf~BLEu%(--uaCABN*%3@V1>GmstSCpfO7XF*ef}B@CoTF>MB~PI4gRf zRV_SF8UzAb(Fo7)p{6V#Z{TgIj4|{!$0#Z2IVxJ(CaM2$|?7B(s(82Cz5&{Lh4*G9zM zM_0><*Fw?P)ks8C1E*rduZg#iwNvnL!Qy=!j6?+CS(J{T8%{t3ujJ_C>WTcoLKNi* zyYdpk%6Q58V!e@&h5Uys{CA}K9k$^6ze2Hq9PPWAU?QS(L`t$UIzB10eSX*4b@!wf zGfE$IR64~omalm34r|;EcvJc;;{5{-d%Rt8M2SiESL0;6Ti(}~LXIXEJ^az$UN*)? zb>Q>~ow6H)v5!auZe{z(WwvvY-oI*EA^lU7FNF6>HCgJ4uN2cl9md*hoI%2+Ia)MT z08Mg$Cc4HVKP63Pcyd7qzO;MQo-7*`^+oiBF!PH}Q&v_#^K2GY)~vK{|7S))H=}05 z(;}jxQX=px3HV($*0;0~^sKCI^e>wfUj9mo`eH(dhhMhK{@-8zUxEFvk^FymjL3-O zZ;Z@u1Wk+vCl^?vM|9X;axLr5@K7auL`k$i_$r?$ufh>67pLu*q*wbwFT3cAer|=V z>{=DJde|cWBtup=cW#V!F4i$2?{BZIsj*-~sTW>1I+JCT*3D{KxObAOixhJrI=V*L zB?Vvlj$geHT_{)nw_DTDM&BPqb$HoXrQpNy(f%DV=Hx^gO@s`ttNYyX9?v^WNaq;kp+^DjXv^96H9MIe(#wxof_`vHv~5 z|4$nDoDtI&qf(bIU`I@D!+gbWU~GhZU)ptiE#>{4C4+U>6nykQI!-<=)MO-B;6Z{W zY0uTC#nh3>C_C}1U#q`Cc~JbW7s`KW?b7L_x5O)YU&HGiWUc#;Cx+tdYo_qb|_m9yX`)a#K=M5?X7*(y$uRUJ~r#v`C`<+25^NM{- z!y-?bcU!fkutoc`gvEua`1amYBrKGE7mn8PWa4Ufu+PRbf399TB*j3)a#(obc+TRA z2e+8)d^D%~cF{&ICE6wB%DWC|lJ^8Qj5i3i*t{L*8YpElF4W85CAcIihPCYG3e4lY%fp z^|g86g<)m@Q`zeLWO^PlD8x4rPeVP59ZjX5)(J);^?(A_jQbg>Y zVS7-y7gchx1Np~COnZ{M`THAE55BTR@0*WA$}D6?MrILbeof|No*4-jSfb_8fCI%* z)CELQ>+|0ZWH}N0@$7k9dCI-vv76Rh$>wrDk9*9oiLE;MBLDl|oK1AofdJ4q&&#jKY0$*^pcAs zcguED>~2@o<$^4;&!4H5%P$wOvH1 zow~@l&*_->g#4io-yw+E;|Ddg4m8TJz=t)Y^B_dSjAZP-#p$^ zK+vb__73d0(#}xvAa0!N5zchW5}d42b$&Zaw^7|$u#QuaV4Ba1uc9{qptHhLW0w767#JNZ$_XhGc@Vf+cO zWdGGJ1-<5>Y>eYkDW4R#P+d8~S@h=D?b;;S*AKKF+2+SJXKAKW372UEFH58(o7=>0_G zbA;5^1_Vf$OkO%0oh z{A{ePK9n;>0pXyorL`NyW?<}>Tqu24=ea8`h8`qF#A*CXnuUW2O;RR4dELLzldJn( z&>2|JCYun|P&@U0&~>@v9kD&);ft%cS<>^pasFW8A3k=RK$FZWb;YXjnPL_)>wXM4 zd`_`)2Q*>f^wI_!I3FqxG_}Gj(15Xo9>mK3Eo+n=LlP{cY5(Te)l*>8$U5cSA80^% zG_Ost=6=e~(gk7kji`9hofCRtxd2ZAUUd0)(E#!d80_S#koolX@cnZ3iUKrL|yGjx)wNMdl7;O!Hmy|Z1(IOQB+5p2J z%*nMMqtRB`ruP;6eCy5W$=1@+ro5Oges$-8^6Mwg#}fVeJ3I=~;+e|EN9PVW54o(d zS02s}y4WVj#OCR0reiWqTQI4j^u21dkz`3&X(P?q2+ zL@h?kh@J?y=)YCs_9!b&P!y;EQ0>_@W5xv;H0lm_HyBfBV;KyoZ98mhTNz1JE>L+kTevMZ}$9 zT_AmUHsr&v>4vr3xN9fiVNkC_zmuVFO8*B>QY9rNgAX2;kCHP)v8yEydRJ^S*2Xc; zR8nQr{6kzoGg}mM@gS%uNqUY@2xtLn^-IxHbYj*d;G2Y}H`v+%wwJ4RzIS}fFfXB< zN-Dq64aWt!AeLSq{fN7Bw1|0&BNzZP1U1y4Pz- zFh#uf=V1Pquj0WX1HRgEFA#fd@4W|_;54sZHpo}7I2Tq~zWQ;pyO9~Z5mAP-5yOYI z<)rvr_g2I3wR{_ECY zBbDu4+E&?i{clA>tPj`c8Nb0FfFNHT-DLV1;;dqmnhapl8uLKpV&9`N3%vL8>*JVM zt44LSUYUPPQ)$(0TZv1>1zq&uVVTz0!E;@EGQpwW=5n2u%k{-RYO%MIF$x@pfY|88 z&?Fl2t92&9^#wkDLzk`nRXq3RzR}yjn;mm@6WV}?Q8&+9M*U1G*YD2*Bvxyu|NMMb z25l_?l&Pxhv^&7_2)O^t#r>&ISR8*+*)8*928lC;TLIr_d4z+gTrc~*s>R985!$f@ zViu>td{iGuF>x7fHq_a}@c=I4I;e8qh*{j^J8I@KcBqGt-=;~RG%aS z|EfmEj@>>8E50bmN(a(3tKJ{Z&C2-+L0tv9R?Rbr38=|>I<}% zS-Gq3`Dj5b7z?Lw6wdvgcjKMi{#*YJT;mfL@J%`~FWd0B7o6<|MfVNK+2P!n^6AZ1 z8)n22yqCFw02U62>6Cq+I_cBKEE zP({0?S=)RkXZZ3ne(OZ3Zz>rFGv-8U| z)b`VMt;AQIw)o@gu`3+LwNpd0n8`(X;+%ccyiAkj-B3=JD!1EVl<4x|+qX~i?6-hD zB2hk4gApvaetD1Yd5_CEoOBo^oq%UdnIn)y(dHQumn7qE&fphiYdULELW!w`-}5?= zkWRufI^~eq*2}abW%ycD+-p4BUss7F7}%xEL^`H@Pto#v)4MH`auD+7%I(`{;wyXz zM!O75g6tB4?Gkh^0pT_`H?yyo3Wrf{>@EbwpExT#I9wCGyhkCmL6nx3rl_Lg{u!$i z@{c7$8WOKnyTkdJ>1INnI`}SYyh#x!nz=X5>T65YlY4zXAHKtwlFJSda~$%YUvMKZHO^VbcD2Y56;ssqz(Yjf^>d zT3$)py9g@(JkR&@5&im4>B!TuZBv)#vB%_&-R}sK?=Nw4C+-JMzsUMl z9^m)NsPN^>$W*ugCegav7WtV&r~n^EaraXQfFw!|kvpsC#ZZ2DuZB6I+z$@RFeL7ew5lQpn zKgETYuF*BqwUGV4T}N?oa?0ZIFL!*#8t#*jlh@|s>Snjfgzo>NKr*3M^rZ%`xS<;H zdYTDQKgo#`a?vzgZ_KNY61MwFy>ki~o0b1lb4awNiC(LM<=sY6T#T0eJ|idIIrh+` z25!Chp7r9t8~HwJk$(lU*3uNa6eg4l5jDH&RVSy{WsWURB*`5#e`){cfX_c$EGZ!+ zj+B5Duet8ih4w$>D*|^J`tITXTL@9cm%n_ z{eb^gS+sofZAqbep|8e}b>zsA2SWBVJ3BkCOg^<%6{7yp@W0iMoXq&PkdT9$3oX+7 z41s}x$S=G$t(dRWla%_0i_qc!Q$)ch+`AX1beG+{EmLi|Xmr%F7xomK`}XmF?Irw=*Rkr4#fG{I?cSN4V-2S_U0qyw$r(_C?~mTc{6`D_ zsr*RAA^UagDXQ7u9oVn$xfLDF_>=V?TKj)(cHT|DPW6$_SB9bQ-@m`&=l?&8cKVXI_q^=(+T2pRw#LxvRGY?*&;Ot^s93eP9w#MT z+Fc)Lj5qb069P{@FDkm3!1VvvfEZ#iVG$9-V@alo#eE+<8>gQYF8|kV{;kfnC7b z`>dGve2iv>JPQX$W2@SQ6CB*!wO}?em`HN-sqfDx2P&jZe1?ul{A$8xkVv~`+C(Y!-B<@puM&$KYKx5RB8pJ%}~kI4Nc2TuKE0VFg|D}W9%%?46sWy zoXT})!&N~fL6B2i{4Dr!gnAa{v6$zpS~K`}vHoHz>ch=v?<1QOYHDhh2h@@UgHuzP zt(%f{uQ$Ct``5a)2lh5wf*QgtVY#8<;gE#$3Su7&pWPT9ws>V)!4$OBmiqL^OfI^q zO+$Kaa-6EG?2Cr7a@8}XyQ6hK@=O&xJOty^n{{T z#s2OB)tNI>lk5p0JG;9}k?Dk`KACv_dE)pgC8ja=cO((EwH zcD=pc??+LwJLkMKThKDxaG!(dhIw_w(BAGgxzicDzTNHhrR7GR^A|42mK#;;+kG+A zsqyz%oz&c1U=;T<+u2w`xeQiPOTU(M*<)jUNlzw3?zX=d04jPkup{4(_>WN`o`sFA zCYb9yS5HNdG`{0ycv#qBqRK{|ikN}DwPOGK5h?PzEeg_m8|TNrJ`62#>|ur1_3y2; zNb66nNO1oxz2HoZRB{oKV*X+t)AHT}xh%PlgdOPtacn$$-n))0_6+QG%5bq>X?(KM z%2Tn!x-aRwYWvQ%^&kri%f?b2v*Xs*_WD9bc6PRM&*#mgsBAoMJE{Z_KwK^5bq5t}vhzM;gy%3aK;K)SS<&}}Csbis^ZhQ(Q z#(j>W+E{JVFz2P?wflYnwhWA5;$~j(=#Gl4ju70M31+7M{a(+R&NOf*B4u<8C7S6* zYd%iFf2oG{5+`TaC$mc6ObK`Hgpf0enZYqHUAj~W9%RN#cRMjL;?*lo5|%f%t*oDG zYpdGYr~|fEgX7~VfN}k(*b4_!d3wFn%eJQ|7X!t7*-kk5)2&}KxRvpyIH#kcEKVXB z$;iPQlX#%Ova{pAI@d!hB_%ca{W&S%AWPU)FE3%I-uEm6w6moP3!> z%kZ8iMbamhQp>BWpV#JkhUVwvCMRu4SX?H*u@D%)f|B_>r*$^R>i~JiB9d?1AUb^X zXd(g8J0024O;-yG)-Xz-6*cK{(W7%yQ&YmsXPW-9s&+H@KF;Z&=m95@^IU{Go3_30 zT}entZsg=}fCm$UYG6J8jiE@Q8`;^HNLX$sC*J}T;52;utQSjt@??m}fWNkslbaiV zdU|>T!zr_4LKH?HeT6*47!k}Cuxb8@+`6oCM9|05N%-(;D7srDj#%W-7e7kql&JSV zb@nWj91k8mNJJE{y$0x@si{eHqr`opshCvPuRl3;^nKqUk(02jQDeVyN-fL zc!TCvUZpM~*JzOI)>Kw16bJ6GsSpw6=;eZKia7NW5si+GAtr`k76=1$2hS{zH6|n_ zfmDbzG&F#0P#ii+_D1#g;`(|Wcqfx+yM!$miqGz^ADTIOTqTmRe}Px)gZmu%;JiY5 z*4FG`4};Uww~7Y>&)PJn$hf-l`3D5#`~JfF5C|+6FA^gp5H#?~>}n)AVg{&y;8Ia_$bu7aOZojUcIt43B<<3^f%b$UhyV_z8kNb_THUOF;5l*>7CCJBCAzYs<1|3hd%_AkY_T;BL;Sg>roLjezl_i}K4Q^iEdYrTpT*&mCr^HR zu0or0H}Oi*c_pV%G@r5NQnq?7;P z0UZeo7K{A!K!z5{bq0X*a7n*49rv85C_EF`UvyMd zWfawUhCZ^aVuvn%A79_ZlD?C8^(PX?h)5_XlD216e$blwo3FU$2L}g_Ny+Q!>6JK+ zcnN6T#II;t?Miw~-tNemr=p^Yii?wXbN$Oi$PAbQ2+=_}N}s$30&HM??JV{1yu7@fE#| zI!gJr?=gwG9p2m9+h}qQ4Bgo95ih(Il|62rtr_#^(OF^V{@F=u)MLn^-hcRD0rUYu zxNm8g){3Bg{PX590m=(5t$_@ln~eX4#t9 zt5=E2ca~KYM5?D=8RVm#j;#Fp6<%+7CyiZP+&Iw0z~DhgQNCe;^?^6L?+u9P#Jyha z_yU}Om)F-#fJ|%JUug5;YRUe2bv(Sf5nc^;%FOZfv2+lV`}kKG2q8;z#RCV40FEN< zf4tUJ62BRh!ib4Y=jZ2Vnwe2%mU}4&!hH`8^gMMPLh*SH4$^_3{f`}U@ov{q z`Mo9X3jMWUoF~C^pJio@Y%f&QeMvYE(dJy(RSyr35{0xLH_NRwOyuCg0#EAs%a@e` z1EwaHmX}|yhJ8^G{T{H;|B+76_O|~H#Qg^8h*Pw*xZ0RAF5cde5ON;y;as-Y9N~u; z5*Fs5pKr{UeanDgUj!`zEpc&o$3RxMxH*w3y);5aB`Od>UsV}=$Q3BaqaqiHjd6D! zPsD`p+`W4Uqy*s~2O5O9o*{Al8?+8hWqfm#*mnVa2jBz zXviayU)F&v(>GTpa9)0J-k)WvV?o`gD}X^)Iq<<4(mXBleC+$DxhX{Y~baTqffphv2NNi@-VL zW9Wh*i#f;5O@1`=0Fd<%IJLjO|Lr?>h98jI6F~@iX;kdQmB&`x-oQO!wYc`6fe4}cd zivcJb)Dpoy&<+lq5|WbnfxEtdv@#^YLAx`iNbE8pjGcxwzcZolbv}^GW)S`}=ZamL z*RNkM)2)Dg@9eC!y!Tm3Cp!+Bdv~0pi0L5{H8q&hxm5esbSO!GZz-^CP3I3DmWg+J zwhdBzRRs>11&4rO`>n{)nA4=ppugB-O3P#RXa3ibLmkX`<mA1*|>UmKDFV(Ihe;3mJFd<{0f6{P(uD z+&FacuP>|A7uCOb&MyjP+q9I{Ean6R1j}O5>0C&qruwNom zQ_~c2Z}RZ)aD;x2x1^dnWkZ7a>8(}c?2=>9g~WkfqHh@r6oU4hWKe<{D^I1NN)Ry1 zZlwcsa2P0OgwO&QA3=E#Lhzj+nRjp`z&n$mUC)wd2u=6saiOF`*ZE?GmFiyAGq zilFaNOO<+T{40uSxQ;pKUf$b#dRv!P6-4$_KRN34_pMA^eJd>CDm;P5L;L}j;&qd3fIn_gzswhj<>S%@dyg$IN%b*MNv^Y zpRpNy(z~nFVUz}MEsqhIb-dK2M1LWbbnL5gUFer2qvO5ml=K&2`-sq-jaXKDecz_({g)Q688VUd;|Xl-I)Jz|wZ59G$fOdAj%!m9AvR`1y{l?PpMZ3x}ExSzDGyQt_EsdAK5$ea0R^ zpZ;6nYk#@|=U9Y=X`Gy#7JvOJ^gWMd)q*A<-DoHe&_5D!_VN&TLI+EBCH?NPEX;z^ zAA_iyY+#wsl7+0i{4kg(xx;BHU1+_0DSY6-0imsXf^fpO(!aN0{1~u08lK63t$;

7BsDW|MVVrNPt2wh;^^akJ`ks{L}%n8wJK%_DrVUNObMgO#vdU`?5 zrsw`0c0ff#V_g^!8^aN7|w<7$>4-AMXB@_X|G$+O>|t1J%y_oWkdpex@TKZ|&jGA^4Xi<*imcyMqK zAD|9mbR++5DEb_)ua`!!sU$vipJ>)|-gualG*PKX^&&Pc;vO(9*=D)lN5fx-l&BTl zK=p}4SF0L}7CG$ot$R)R}6CdG2ZkD6q&Hb z*N%;)q@=_?{}cgElXCa>`E*-BJz+rbXc4*(wTrRIaY{QH%!v*<2!j5@b%S04tP`2P z9YEKH|KVsw#om@Z*%|)B;$E{ZX3ftS9p$z_&bldFG<2dI()W#&TNxd3D}u6h0j4H&>nl3bYZltID8Z9(m5cOxQGsCP^ zV=>88l$19>00%)->B-7}ws$MQE{W=szOA6{&=T$Y)3LJ=Y4VyzrGwwK9&ZJO?XN7> zRz$RT`xyItVr$LN{=J`~Es$6(r?SfiYl`kKA-7KldP~=BiFYZxfGEh64ZgKL&GY`k z{Xb35#x-oFpOb$wMDFid=?3K95{(Ubk;EYRSuvCjf2x|4+dgn9MCCjGZkG*xos!MoYp z8Ig2$l$cmnpHI)%ZqZ)5L-WT;jRT4<_YGCx4A*j6grAH1MkO5OYrk}Z_8Pv}=hIkq z`25BVT{YeNzlE2VdvJo$Wn)17oi)L=1dmDmg-6PFi#!$*=R>af*VZR+e!Q)7sPrF8f&OmNOhQtm?Ivccsc zIy}^v4t8wWLy5$Z7I5WKAViWEc6v9osPW9j z$Ul|7-ZV$EZ1e0F$8=`$a#7B9r75oc>B;eF@&{5F-d%4-iOMP{WB`1TI|7_w)YP6D z1v%CbRC>l@R%kl>$%Kv%jy|39?X71K5TJ%z7vN_#Jf{yiPgYy&9gl$Dj; zzI!(m>RpEqAFhNV3>2;vHTFGAAwBCsids-zpWIY#!G)kxhus zW`zv`FY}UY9(9>$W`x&}F`y`kDVUJF=I9CfFu21bVL>Rw?c0>Sz*}I$K(-D66QMkH z3O3=?Uv?8RW%xxTjzi%H`BXlhDHg7pz9tKiLS|rRXUF@f2{IY33#K zB*njyb$D1Dm%4pmuWulq(-Km~-J`LKzt((4A4I$I?j8)I{PJP3pgKrHzw=4qP-&XC zz`43>G5L3`2b-QO49m`Q!3Debg|oHMa1FMvlJe!FRh#>X9T(t4QN5n;s#@Ag-dk*! zzvC}nh-tY&S@mL^E4=~HG+5Iw)Lo3KDp?pMFkGlgjgPZlx}+G943!chA}wvCB?Gds z14Qc!1CWP8rQ|SFAA`SqQ6#EFQs2tTs~ek}2j27t3J2}^Ls1oi)*X>!P0h_fZlTho z!TXZqU;ES*c7EK`8eX#lig(EQeVw;@h*ackcw87#g_1 z0te)iNn2dlBY&H>7ZPi{J_6^wkh(>VCb?jJ?d2*#HEH4M#PIRQ(H#BE!sS&hR@p3E zP{Sa?#1_rCvPW7qCVpybEBHE>h;?7KtoIuTi;OhhJ>9Nl>c4bkzW-x1q$gESfkKnO zwv*1{4<0(y7!dR7)vG$@cHw^&bLCNSXT$=WhVR%zy<+1k^&}_fdP*u3H=VD@p(pE$ zpNDmgM%)#<3mOb2nDdSEo*ur-gt z%^_S?`h@txr;2TH+u^xr=acGR>inF)*s31iBoiIChLd)bWGrE1Leb^DF|^5BPj43IkwwA= z;EkUrZ?a??w=lvf`_J;zKb#0ccZR37e(s@VZZQs)_VY~(43&pPOZxtAW2y$$0%dAV zO)4Vfb`7#lNX{S?B)ZX8szO65Tge}@f9%!p!s z{Ll~FrI6ma0o45z4UJwCQ4j>Zh~(s}fPydI5@rTIr9OonjP;9neJ9)LYEy}bV(%|E z_#k||Z(y2&f z-ioDHt=;Pnv_2*TIieqVYSr``11SDmdLGtuhysh5vQg>R@f!`BvhAXuKkimdoou`fRfL?_sCDB5acM&RKRZwT)GA=qyG}~K3 z4d?~gB|jb8g$sjlH$ZaiSi*W2kioxjUbm}E$YT@0UjPr^Y>_PT~{w;GXVuPs;0_Rpy5sVIJ!9vR^s{riQpW z_(-kVCLNF9FBw=yrBfEYU|sp54E>quEBa?#DC)j`)S=50-@p>j*J|Xvk-Q(C{Jp1z zv!^lfRoqqh4a9R+yHd{sfV8*$$bJ3XZUukw@G5~B40~48|2Ue2LFgf9yte55sWW)Q z9V>i=?hjPk70@(b+hE!1TGfR0rj$|!&zX|OwcO}l#BIN~^nPpr;q7|$Jyr@dEZ z*o{LkIv`G3(LW6a|DlNVZR6ei^+~&&9vdEm%M_~F15XniFyou4 zi%Vsvefq1lrPe_YXYl>Ao7`Ju4BQJnC*2hz=)pV_U&r2A&+U6lj3&tmCF5Mb-`{K< z|KIppKMIBT8#(%UdRqkP{T+d~Z{IG$rM_`rAe1OmMBU}frY9Q^fYa{d1A-ol~)Q+oNB)QGjm9~$a59qPr-;1S$zv*x+8 z9ZbF z_rpKyovotiB>WFTU>MDzGo%fyy?18i+lM}Tpgs|^ITeEoT*l`&Mvm|wcIf@U3<9hA za;$BvX6@g0KF0yW^67j}4Sp~tIw>#wf7|&8W85CxY?1H2ZaAC)3;LPl4wE zAdg9M8i76G+$OKViN*e+VXkhMnEyc+tH!eXzy%lwCY=h^X2Ub3etL5fi;kPdV~bK4 z*u!q;p%|JQFnzvI*U|&@RFwL~2%Gb9o}}B{1n4aL*8jV~?)C#$C~?K4imCJ$iE-*V ziRXY-_kJ(B!a1Hc_hL2x)Lye(71)>~6hM4Gh0{*-ag$qePv}6FhO{Y1@wB0d?n(4h z0;x2j@sHAs0BiYMCPfSFx)I5MTZ{U6nakx6c~BN*Ti>uNX4%7KavkA+u$Wt&d<^#w zd1kWeSC;mp?itpyymUG5x!I;(8_TP1V{@t;=0MMk}PjkD-rqIm>V9&f?@p?J#VdaR&)GX^&~rU#?q5fLye=^*pdPi;4)s% zvOUeC=S*g;a)zg`_VtCk9j32zkVCk4V%<^Ox2t<9u_F>lB9KDpck_6jy2%6(T-_;I zPwq@Q!R5vdtGrns#X2@yp&^DCLv(@dJGUErU=@0g>g? zT7Vtuv1u3th_NXyXeWF*M!RJCG1+417H^~mNeaUaXnhB5&w=GEp>F>iS}%3&XVfHahVjqS3R2}*q&mM?RpsUEzPc$Jb}n$a6YqvdWJ56BbOYwNie`@` zrLoW8*^xN}?!MA&o_~XD!xUtbzo0vE`n^Wn*=9(p?X`!os1e4We1w z)%0NYG+_4Gaaf$__qQH~2W5zntgU=qaZ6uqP*Z{F^{n9T=h52yHDL+!>5)R0o$N~HGuet#YpF_ZBMxX zl1V^>S~vy*pYh5wzdmX*i*f-u9fIInRhzpFS)p0|Gzm~DgqqWdnv2CHe|@mj_4bay zz6}rZgl6#*@rc-mA$RrX8|s~am?}1we5(IM4&8c(XlZFBwr2}`#vj@=aKR^CU2PQNCw={^Z+XNGVH0`)l-#f26BzCn3G%$PihoFgeWq zP?q}%z--GsvKO^VAwF(0%K(+Zc8pZddTP&GSLTZ`pDD{ zGKU2{^@E@9la=Ih!w}u98V3PWj=6aEo3o-cGEoOT#~aFOLYlc1H1~IR#BkYXgsqQ5 z&@(6x6o-_u(kK6FxzgC^w-TOj$y9&$0(5+ju$n27^x%G_^nAKZZmkBJabx;q%*A)2A>k@QQ3-D z`Z;bZ3Ybo-g~2$>Y(tpx0{^MEi3e)Vc(YaSNBgU|Yh3%1d>*XGpdC!IfkAk&&%spJ zv4=}OHkCcdgadSVn?qOl*4zxt)>#zpIyAo(eVYq*0>eq>RjW%ITaRJ^zTp`(ZlM;2 zd!lA-zVuQeJ@paE@wCnFx&znr_K9Gc0LFkC=WIHlLY}DC+LBigo_uc>={bO-TFsx8 z^11@rLcsg!RI0`24LjR`7aSn7otfM~w?OfGgd>kj*FyT6h$@8V#_Fvg+I~i+6(04(l3Qstgocz^Up)J|H;*PkHw(z+oBSE$IfBoVpjSnrj@WqZ!x%9E;`R)lKa#!>7Cqhh}kJcao4 zFd}1Y2>A#YNm%|}n+6eSzc*gBHunfglVR^xfRV@h7s^cx8#!gSLV0GY`_iVTeerBC zC3=lz%}lRQm7GhA{8F*=OC9Kv%$0f#O>_N0`@0I{LYGVGmv!TqK9=Gpf2J7}T0JKd z0;7iJ_zM5~n2Gd)<3ccxG>SXULRrGIoR*fkUfDmkW@WsiH>)xbjf@LK-I?)+@e8uM zU0cb?VPEg)`&L<0KZSXl@sKk<&)YcYq#YSxREGR+7bMN-+P!juxHofDqB$oBZnmA0=T7k%7;oMtnCC2Ao7XuAMDE zix@qyQWwBd@Qa}znUatx*50O~hH}C5)Xau&&jDzcEQ`Eo-F%&V;v+tmy5~^?Z8YBC#6BT)LS?oZqN--qX2bg0*z!H#* z1KhT3exRTiyP}mNyxjNH9V#%BcWZ3><_k?RYUv4gLmx|SB=sX#P0&<-8B%p-QRL4F zcuaR6gE{YD7;Jj4{E$?^7g-Y*wcQN2$>@EamR}x@^6$WVj z8L65=71cP)!JPP7X07>h-Q9DtN<@Ukr#D~>BwhQ5I?RBRT95}Fg3&}|%&#S}B^aV@ zv|jlGe!pALGo}$T^rKlLBAM}KXBs=qg?UW%AAwONh@T;O?K;#QTgFCm(iP1#_`i!A z&WR;F4s|jln#+-;-dhwh5R?p?;=%Gmv-|2AzG^^Zk}^HR4Q>#p3f!C~uNzYP`bS=C zXHeIevbBA;yKX1Q=mnjMQRCd~qfSjWwk8fKxZkis(?tO#Y6eC;=a5D3y?CSqiz5t%*JBoIc}*&=I*S}I z7lvbC;_K!P1B`dUAbrw~+=lcgx3kFrw}wkVbYM6u44GY;G2rCqW>i>7Y~thd7{*|l zV5Id8Pe5J=j=%k9Dt@;xdv@YS7YvHUyV_TKCzUuu(SU@-3~J&FtIQ$m5pfCYh9*M~ zD`schOkW(|Y&q%waqdP31HR1QT;9Ot&45u8n3U`-OzJq6ktn?^Hz%cMZQ@>}<=M}= zZIP{cL!eBf3CSXPTpK%If6r?N_DGp-S^6V$GR!UYB4piS<*j{qG*aqaV{C+4jzL5+ z6dAwNB~wZm2k!b|>b~T)m7ZDDm1O{4LwusFYvuYTA*b46tf*k?1buz3eb4&ydT&>g z_pD@)#}DS2sEy%-!2_vAbkD%3A$QbWOve_qv)_PDo_mI5h7t|i2x|dYi^AE&L5m*@ z*EVn&C$8T&q$)xqiC^V)CzkE4M|q7WgfzEvF-^(YLNMHRk#?QOs-n5;CdGCek&%hWs@I|Fs8A@u|SWAuPQS z5mGNIc^REHUJo}`(G7utkFENzT!%Cg*e82u%*Hgidn zLrsKjNn_>PyOqmF+u<+Oedfsf7z{X%IdS>Q#*K>*=)RD@zF2-<=f_U z;=Su?k5fD}nkHx4+*+r}1PV0%PkV13j@A0U4?h*9M9D5u2vJCqkTMi1CDDK~7p0^O zl`%9L5|X3{i6ldkOc@?S$dn2hBN`*? zUDthG=Xox#q_(uvrys2JX%kQKD|@v?KsLzd)cmLeLQ=QBO}+^4czWj$yV5RShlK~v z@3Wt0+1R)~bw}~|Inj5gsIIP?#yvxJtf~$EUROb%)a^>ro}-t|Yd=iS5B}~T9p;>T z*x!|7_t}EVe9H&2CG@sN$%zU zQ`h2l^{2f39++Gfw4J^WZDs#HXssYP*PQzbUe1-;v@S3O$vzc%91XUE%m_g|2|ttg90 zt*cUatLnbed~MFEjY9^REw9dybrrfJ#YInG(q2&7?B4V2qF4GlM$ZEH2UP;<-B)z} zg(leNKXvhv(U&H9&O-KgpDoV{c1xO=l*m@ob+yOOPBtZ}?ibS@zr)XM675H^Oslf& zcPs6XKqA(QgV?xg~`rXt%iPm-a zSxnxIaH99NguC+`a^h5ZgT>C-WYsx}w(PAi4c_x=NX>^|KtRH-S3fy$XC(7tsb$|- z0zJJ$QcE?Ey^hl zYxSrM`kD4@gf~@61U;cF=FE5Y4naDWk z5sq9W=S&9!0_rRZd8bGAkPByLU#Pyrr@WMxH-U5mNFoo9zq_%yq@u#z*Vnhcbj>Gu zqNw+2@yt||oc@|~wf4=W*#ElF@z#@lgf=~8iFd4JoPA4_>+}>#JMOsU?%dh^vuS(B zHZndQi*B5mHBvf40P6A&ch`}@jhsCl$4pP2Y^gH~)51I*nvntgdqg%k(tEwZNh6PX zKKoJg0p9unq$qhekWl}OE#L|_0DmHl<(}2ppI?CU`uI$Pms}8>eZc@IO8~scwBDOF zd$DF2wmnL~>XEhk`d=3S?|4^Sd=PE@G<%W^&jIlYCW|KN1n@(Drqyuh$H#fk4up_Z z{;vx>f3joCeEvI_mKp|GG@fI!#(|1e3G+00(V3`_2Cl*w1L7;h8R>nYCt#Ene z^#x0y5xWIwny=r#yMwvXexdB4G<3@ZPA&kJmBSSV5Je3;m+&xg=I!~ zare%hJ2!E8Gchygx0eR$b(_?^I%z*WN&nI>$p6Ilp9%exP;lbv(z$pj;Qs_o4U&$O z`CloP&8&Qcszad&`7Z>gxbS1d9)Mpb=HV}2)>(j^g1azHBEaM>UAfZZ@#8g=9F+Re zuSDl@Td)CgPH*8C7G6mC+STRNlJ5?V#6o~8mnxBJb#-+$p#S{*b2&LVQHDM^mDK^v z4G=k%SweJ#s#Q>;U`+RiO+! znn-xZ=XgB|N%sSHM4>ENv7&UNr%;aAazn1|o;1o*wh#ma^OORT~W6 z{AFlph+a{y%m9RfoabBtEYtW?cRT~}CI&x&qh7F6An9xD|kiR8e2@4Awc<|=U zHo+eK=r zd&jF*xM~~w4Z995; z;`McHzr9`fwx}o{yGe;~Ojm+DChxR@vdQ|JO8W&Mgu9!m)7i-y7-ukBF0|^nd+7hR z6~8^)`CT@8{oW@a6^(6FKI|Pf}7=EMLBS zsnXU*-s!klzqv?-ycsW97c36|0@w+{`Pm z`?l3sKOv%=stD~VT)xaN_MfHHrNi>{=~J-JbFqw=>Cr{>T$o1bKHc|Q?i}N|*J`1nHYS3DVfvxwLWqNfl^)||40}(%Hj#JH_ zPMT{$j3ImMrzq)7&+OiaiGaZ9I`!R^L=)!gX5aJi@y_da9z2L9O%9peWGQSiC0`zS z;zrUbvzy!zpktT|km7$|SqrWuJR)hb{%mid`zgUlvAaH54GzZ7YxeAlf(LU$aeB03 z=AM6blX&RvFBv-I9_D}cK$i~lW=ToVE+f(8cLlSX%n0HM1YHi3**`IB%Cs5)(E6BR zN5U4B^RovgSswKtV8@9&Zks>*mP(LCykJwwh}ryOYAq&Xim_(*_^c)iqYc&HKLMCl zsSo$|4>;4!yyfj58xe1m{Hwc;y~vkWhupGz_AU7^#1d9tcg)-Zxcr}>RBB{oWR0St z5$SyJtj?!K5L}YCOcPn+RdZ(bzTxB4NReWmg)~_gKPj``(xVDnx(A?lvc_2R{&`EY z&oAETe%|$BV#HZj?VSCzgOKZxKx|4{#yxvpxSt|WkB*5ULsoTl_APAM35MQwO%8j| z=z|MpMLm{#MtUq=y%2np(fnyVt8JH(!4F>pX5i>F=q4&ED$!~DGt7iPd(_|@5OVgQ zOB0$f#lAbS$f~DaDnMtKiupew#OH)qG2PcpOXPno<#GOXkl<25a600&S0!@U@6|5$ zo*XdJdzwF=poW^}USqWX5$@gx+U&PL>VzP+#?%5t2)h#uvtHWv=q3m5`P1vOgYQlx z!gJoeyDW8LT(_&HvMg9;3G{;=Pr4I53i1q`Ta@PzgE%!emCfH!jve9K&M zmwsl!4-;cG)IxA66NX=Ts51T@f)@|=Ko2Az6r8`A9+3KgMgbG$<%xntWshVJkJchC zF#$Q4LO_8;(#Locag?A_Ybuok9t4aW^ue^5}6Z34S-^qB-s$x~lBs3VL#PTdj!ImTqI{^U{BNRw#s zzxdYqY~j_3vs(=v7j&45szqGN{KblMJl}a=PMWyd9^GF0xv^}?8E2S=Z143wyLK_& zSo-qlhH|%{fdLyh*UO_ekn><}Z*K=<`;d}4@RpRX07)-ZO6y*5`hD~8UJZ>s#>S7a z5m@RT=-(*kJQ*|n_`SK4U{oOA@#crxt~5^bwFQ;0LJz)>6H{wjsK4ACH0^+C5R0Kh zT31&`zZtZUUC0=GJ_?s1PpymPZdFyLvQtUReg{0-vB1`Xrv2ng7sEE8Dl01ewoQ$T zqGglZgZ)AvqaR23ITJ#a2g~G}Zq9}8Mtlc(q+Nh`8$#=4!_k*J99YTe;df}da@)oA zBBG*mw#XTQVCXhy-h4!iClJJwsE+a#0XUHWxV>9|s6jx%BJkJ11?jaM$1A6nRlBY? zOQuTqZg*D~j%Su8N9HrfDnxWb?h6huyjUcKXs%?MW5o(2jR7`0KG0UYQsHviKsMe% z`j-X*CxW5ZIam%IC^P|+ABDNwcC^uf>LsFGU9Eh$}sxy$lp?M9wRCu6mn|D1_P>aji4Z7 zQ&ZE?6bnuZ=~alth!cp6j9jyM^D-r-)aip&ld{bw%oQJg#4M{U)g4-G9PPJQqtwwQ zdFb^9zjapa?PdsY-3m_Z1Zo?j^7H4{4uVdUFQyPFTfPFoH;gmMDIZ9GKaZ3H@TpS5b=?Tj{&Efm#)(; zlpPKZviSGC)CG);!hvBdP!&MP?7bU{3#VqORfKq7*L!JhFSAr5ZRl`M9W7p46x6l( zd<+Oe3%yq@@G7_lVGsd1xruCF(CcpG8f#8bqoU>?j@pkA(F~PDe2hbWfN%Nx8#E5t z|6l`Z zcRqHEbEAG50ZoDq%7(K7@VmP%UVJ@p>k7pRsf4wHf?qhy@7_h-H3z20I9WFl^Xslz zIWqN{EHK;wnkCLp2*p--G2&;BFHZq9KpL!%q;?Nl4p^_54*J==4p~G2n#c*eZFKjG z*z!4*ug2m(KbqMYNa5vFlLooOij7Z#-}(XMLj5eq<%nsq2$aAaz~Ws5N;%G+-=JU7 zH#n%Quixk#ehN%*!rMiN>oU@^04xE*Bi*4KEwOU(dQnkjEv@B1+=F2jl_@udt$R>U z&kG_K*nps_1@d%x?|QNA-FU%8-E_efmtSuyof*!#7|v$%@|oehtw(K>a(rLf8PCqAUSy$Jiy|KIc-)A}Uki?~~Hnlpa z@|6lwoDOY2UDBJoQtRI|<3kXS0lDVDywRK?H#b$MBbZcY0TH>~7>Fxw)ar|%nIQ8w zAG-*+R@RczQkqjEP8Zf=zs_J_ldI33k4HlB_v`SF-&^4;wu1KAnXQ1dRP1=WOG5Lf zdKb$jQo6{~(-12&YLx!WcqC5@^)^QyocvmFOjFOcgiH$Pb`SIvsWW%cK7DebI?>NV z0|W5~A|pp}H?Bi{#V=?!!^HwkCMbKKIP)NKDZ5wPQL9>Ui#PzpC(#$wp`^gx!xh<*CW6B8z3jI?D(l((_~ut3WX$c zoX0Lw$ezW{MWl6a=?M-9)%X`PB%C6x|xoxi)WGYuE>GGwP;9TxndH%kco)|M87izn#YcL z;&eF2YGc)m;7a&Jh!6JFTPTXbPLLg;uFe965l$~{efB!^-(&Z=o9>t<=U*frD^(VF7JJ*iE@F<+WL0KTUiVZ86i35U! z7ty@IFTcMJIjo?tcJ149liRM3XJyAGhZk0gS6o~%Yekb6y_TsW4E-G6@F=(f@RXGy z%n?xf7XCMYdS&Y49$sOpIw+0OIJN)mqcww8OG@e|^0|~*N)5)dcsBlbTajmn{$FoBZOFsM_;DA&419vw2#@@@@9`DC@t*hW^_PwaIaZ1}{*_+&)` z4xL^!TA!x>gT;IduMTMOZ>Px(^9mPsS!R2`_;P!FbX@0$Uhj|7WoNNOntF~%6G9wm8!=E&lLW?w*?P^wN z(eS;9O*(r0-_4wW+<12RvY*SZ)u7#ttXbr`Y4oe-x-1Er|Iw4)dc&996&V@G3k^B9 z{N>KvJFBrc_Inn+`Y-BJg>qENNoP@<(w;I322Fdrr zEk7ROqrdfUqK6bF*SN)xUmVq2AQjp@<9{NPH7i}e2IH7hDkjWosWBqN4oX=3beEVy zcK_2&iq2t^sqcjAcJ(yT7m1whn{uz@?kHf_a1>|>E(>$+e(QVcL{67w_~TcA^z?i=NCOj zuNK9cOYqsCX3yl<%NW=EW{D(6{O<1?=c|-^=jt>Z>bJKOza7%2(JtG+z<-ItvEauH z_i~k|eKe@5u+(Zb7I_buFOl&lW6I%KY`^Zr<{L}5+IDRIEC@{8T+y0yxgRA1F`+aU z%e-5)*Itf>uLF_%XV)TEjV!GYmVn42>YMy=W3ts z(a>}YN>QwXjLo)=9RfCU;C(+UslOAoM2w#5M(yewrut4&;4hrd7<>6zUKTFXXx6zn z#gkoszuxrgnRB}?FTIJiUHe*|#cX30U)K>qHy`^YAsH+0gxF;l6dmw*baiT8^AV-; z(0LB$bn`vz)-};D9de@9B%S~AMtDw&by~U|Ez~?b%38j?)l19h{9&I&Jg?;8BOdul z3EMoyfT9ZB%=x}{^g~MJ9KrNF{c{D^+Kte<#-tjI!V@~P`0hX@}7|j5b!VpGo$U)vdf0M{i?aw3OZ4_8>NXWY{>{W}(4_aG}Yg5TN!5 z8eHfTmf`cuMDW~{;Ep}3=?Kz4I7Xk*hj{<(hL{QJVG>+hf|8&DtdjGAv*ukc{=0_u3yLUbl? zr1)crzfoy*qrdai#OO$>>$nU_8uZrA^NHq0MN)UMlzbTL4lJ0~?N7n(}m zl7V@qA#p0cp;Hesx-U%Rwp|bDT-X2X${N#-`?>n(c9oeKwc1f-$WF}L?bdp+?%cEC z7Ir}yxZ=i>VcRn&O^ektC*Rw5tCruy7R8X~^X^yp(O7WhyV2!iqnY@+Nyo-=S>1~Z z`4Td(zBgDrQfd^I|K^7L(e(4o+w3GBU*NcI^YFlhFj{;I?ZnUSlQY*R9;DeiyM}gd z&#V(&Wg3l~c23JQ)VYpNK#5sLtSZu!<9KtpBsSFhmdRn0vZ-mTk-kojy^0D`M&eug zDw6`k;?#4{l9HlH>}+CS@Qc#}U2O8MC;h_ zq<^2W@1v9*qqSeZj~}Pm-3%I)efE6vUij|szm}Q@Z3_s}v0gwdoRrLbJ*xbT9K#Be zkstI`p_J(di})A{)q~s~RAleQwA~Kg!s5`rLaylxbJkR|SM4v?&XwC+!b&WMrD^ZU zysqut=J7O^s)R|gs2dFVY2^P|q3(M{m@g5;_1nWkG<-<4_7bby6KPa-&q!3=cC=jf z##T}!q3!j_3Oh2$5;WHvU=Lj!JM@5kx}Lh-PQth`^2}EI0Vm`%=ow1g`xWQ#F+{yL zbgPi*$s@)fZ@JO7ds@Y%p(SZ?}(>HK8A&dSG4YHqsO?aKXQ203_)YQvRWAz zRG)@h_Ed==ED!_ItcCYmk6oe34-Y!maD_)PX&qT`V6px9^>owl2V=#I^UW1>7ab2f zexdE%AC1-unY{m0@u=Rt)y{Oi;#aAfqV28x9|t=r?bGzt?*Q%UEZT=TC^ra7!W=U3 z-MgZrdF(oQPfov$dufI4*k#wzt_!S+n^Nqkfd^Zi%)5_ANyNJcxxUxGhLKC0e#z;u z+>Q*&WjT|?%J4!L%hI;!(RZB$Fi<*GAmrF-BebsEjPJ;jY4SR%9~or_Cwy& zS}sW=R99cC;^b!Tep6n^oB*0w_3ez~3a_Wrj#0Ee+n#m9nn`DzxbWgN)8iR0YOavR$!>0#Y24hCDY? zn>iCQC(%JT!F!j}iZv@6pUR51rie^V{JJ2;DkbKeO`RB$Ez^Hpe{wwQHpv9bP4wl{ zZ*p!=o;sj@0T$!LU>C+q66pwFI=r?9p&F{T^Z`QbqPTR@nU)b!}D_2PW+A4_IRt zAt*cas$%Pe({R|s?m*LQ`BT>hMu;b3z81+Vn%nv%YmG{N#Ua;4Oox1Ws6(y>qa)3e zO>I?P)@iNZSDQ`5%D8dKSCVC%$_i(G&~5i|EwQL_gWjd)Tt>X+OQej>sqKR0_)EJw zPyb8bszO*+RRl6;_EZqB>9fKhZ{1mTnE8(|z>eDSx)i#{Kzc;SRzS)_6waQ&G zB5tHuHoo?Ar?dT7=5h?g(^DGv8h7loWph;Ee#{^^eo``cN}Rjnr<>Xjav~_JR;Oz2 zPz&t%<jEpj6F|;(a-GG!7SVNn zL0VUrr@@hZg7O62?#)yLIwt^FyvTz z2OV;+c96{z*kREW(r)!|aG-+y_=^;gp|f7U)Mt$pb7E&FWpV2Kz4A+BT3M6_t-S{lPh3D}lh-Q6}9 z<~aUl(C2%i8*j?OJNSg8H+DFWAVBGcmFxb#-RjBQW}Y&gQ*SG@&?YSZEe>^>=liwV z0`%%9M)k9rOZQbMet!vtA2xDkUu_e)4v*AemD3yOdMf+;ee^_X#$|R2gl~@{KshI(k0Xzy_&;(s ze&={3@SVcuRNh&~agt#Aa{~39mD$+ZrmBG$5MM&> zL{4_2nFdLP|0n-PQeUtCgV-d`zV+qO{O7XE|M!F;ZphqrR=!Y7O=r^St&Q_d@x97f zj+!hItB-(ov5NPdXKtyT&&=t?+za5I;6032f@KlS6v(vLr)^$$QaD zvdUiLSr??AKFX~*BWKcDJakD{IJ?R6{16=pgjuO`@`63&1v-s1y>QPj zAf>;NyIbxTnp@{tBi+p0W!o+w^?lqn#M|^=RTVNgNf?|^qehzNqasZKzR>$aaW>T? zB~6-Xhi1yhrZuxCLqj&wUiY(PNx-v{o^n?$2BQVW+FfXz1(TuC&a+CzYAS6TXBB}3 z`E6#t==(SRa2Z=Vr0MqIj*;kvk`#r4D#f_JlCZ0* zD~ei|ERO}fK{wru$enN1-G__wsQjNn57sD>eHzb%E2e=ou)pmy~|fwWjCzB}Te zgA+2J@p{64#moERMywD`B?{zQwj4>H%ZHKx@G0cea46J}iVJX6b9Z?8#wH|? zuRCOBWszSZOAoF26V2Jqlq+xFs-T91#Q3-58bef%0tV7ZqP(GD6(o>X%G#_|W=Us2 zX7!qU70No$7*~Cs;ub`{-U3kG%1y=;gz-bwI$E7xs=qcf393I*NkZAJuvK_-McX3FyiCX=mizSMa8uYmh!!=;+`e?5l>Gw6*UKWPkl? z3`W)Fhp4^vLtJ(}Z^Ou4OE)fa6jeSkIz^v}sb6O9q6>iA#yIU*ku0|2-|*8m-C<2h ze3SvHbKo-P#Ofpd`{ZGArr@1*S+)e))W-q+I z-Ap=|1wW|av4)>&E%e4>%e%3#q}8^!5sXU6ow9x{(!2oKQKEPTCDxL5Gn9*Ce(he1 zY%am4pTz+KP_--gvuRvMsTeXgn))dg)lN6W5Qh*ziuPFgSF(3gMPOYah{14&v$sUTe zj!tY=Gnp3DZ`EW6@{x~08*m`#F^TY+B_2iYN=*KIz8gzP)@JYTXHa9u;6+R z9ZC67;_`@sq}c60AqYb~VF&M$V}8`&HbW>)Q?qHmdNA8x9;Qxu($Pz0P#z*)PSOPLlg zR>3zCC}jSXnD&QkOEw5w2docPu3f=h&**^P^at+igLh4VX5z`8GK;F)N}UkUBd;96qn~FR)%+PLz+x{~w3h7>LUf{{T?gMZk06qW_)e6mb$`uHH@!U`$jWVR-?{zpLU> zQ&aPWxVgAu^f@w7v3I94GBB){IUKs}5+uwp-@v&E*DSRB@+Kg7i{Gl>mS@}i?}EiR zl*tw147mw9*Y1_|sJ-J;4GdP(LRse3BJHg(&|Q7O(ZGR=h+7NaxB?;oeBT@j^gb`9 z72Cic0Jo;qv;uZ3wmQb*6JUD>zcVXED$nfor#`t3dpDDjIt(-vd{#te=4yBn#qmTA zaYKvYNtnW2T3Xf4mQ?3)=@*7sKl@VP!&~>#1VQi!1~N2Bg3#pDO->>fa;7|;Fj{kVVbcp2vibnjk!ty;5Y&8oeM+gNRtV@FONAtNI@ zrlzW>OGb78MMk#YgpwRyxtGQ2OGZXniH%9c&$JtpB`5SWsAyPf&tSNL*h?;;e|gpcwKG zieExV#PrYWadtL@zXn9{3&H^Bv~X4q1P`LS{rNwyu<^jz{W%)Z&k*IO=Y_Gib#yiG z7S|Wl^~L;ojhBs^y91H%=Vd}7{9^nt7i2mfzAiR@zG_D#+Bw-EuZk$ag8rI0T;lN8 zT#ejCoqQ}DG}OGEJn+6+c9QbKmVXXHz&Zal2o|QxC-mna;)Xuj)?!-1%2+`mV^#3a{^h!`WD>+B-RV3)!ovI;ctr8cN!lnCNIbI(zFY`dOOz>bZF< z$!mHFihGLbi@0DEy?vZrg{?GDXN471bQI+ABARGz6Gf&kZi$ys(5e&Vo zyj-j-)$uwSx*8a3QGH`EMQ>-k8qUW>Sp)4T=pwIc=j15p=Hnu%SJK(qORla zDj}kxploA{AzJCXDA?QBn%EnP2}$aDYiU}E;Rz~!USepXqmv0%+fmR*-M~~+fvBnD zqlvad`)I0rDH_OoYippjJh7&(?h4-aPNE8W;wW3NkFuwyrHiDNiHn1wr;UoPrLPH0 z-@#tZ)(@+us_TjM5|$S=vX(Se!$>;I8!NebIXOsZ8oC=RDWmm#>{K;u@XBJMs!FE1 zcD|B+t}3?1hC;qBBHEU|DmW7YiXdU2D(EJGQ`6K{aKbqm_&I2rO6WT4DXR#H`>K0l z+)U*0Nk)-8 z6G?G*6?dGqyqgAEL)25=-WQp@sI!Try`!Tp{H$!?An4$Mw^gyh!N)KdO?PMbt{`vi z<>O+gk1|#EwYKuq@X^#3HX?|K!q5utE?8?zQ%^rpH&JI>*YI`q5cTl1f&q1HG~lD&1|~Z84nCHKy2`c$11}XNH$hE;gOxH~ zMN&;r5M`~RYb$Oo>WcC;ac~h)*OD~$@X*y&B>L$I8sU^I^U<4(5 zLq`JzZ6~~ks;-{8skgJbn}@iAo}!qBqbgC0pz3Cf^|2Fk(bUrO(NHqN;Z&8JJguC3 z(eBQ6`u5g9Q<`XJPfdMiPce93!@ylZTwG1b1V==KSdWe~L zyWpJc(Ut^Hw2!{4l9RQIn6VPr!P-#KLjrGa>Y=CTs)rN7IvVM?+7jgbbo9jaF@7Ej z;+{BpZ%KPqH6dLXK}p}<5NoX{;U|yM^mQkq#ZXzPgiID7fp`nei9IScv< zi>P{H^*wx@y(Hj$jEk_I5Zcnf$3xy4?JMl0W-MKHLEZynsO{{2wo7DN5E;U;iP8e zf^xw-sk-_Y7>EcNX!yWo?uH&Fj`~JcZfe@yi#b{2#62{TVLfnIJ9QyHoUngp?@=q)0OclQ$#62kiu zO^k&swG9*%jdWeDt!y=walTf@o)~8*U4)J#g{+iJwA|48URIhOO70pMNg**Y11nST zGf|Yhue+s=h>fYblZZMF)}r7h8YxvC$1!@D=8%6 z<*ugVAub{8jPlY}lF)&Ps_Kg%h_tnG7YCuCtSXNNUR1WTLSnlJ8h-pcp8pQ+@cUna zMMTN?dZ<1b*=aI0MR|Rnr!)QjKAN2*xrJKEv>uTst+IAY&$6?LJLxN5Bzcq!wA&mq z`CmQM4gaX$Pt9lQn)WW`@T;tkaX;Gf`J-s#iw?%BQgVqGf1^!tjnZtM?3=^7nhq> zX8W1;It|5_3}GkXSC;?%^?$ALzhL;k=L>0J3D0Lkx;zF}D!b*-VoiFF(hEKtIhQIb zn!5U>=l1YANjdwO@VX`aD*0)X^y?NrGT?7Oit#fxRQhJlzmE6KWXj}QGGG;;i#24% zcNH1)9+=E1PrN#jarK|kaOt>F%Ur6bX?-VK9Tw-EnkKSkw(^K1aBrs}O-$LJP?2Xt}F0QPp3vH!c zV@liCPlsM^G(`znw_W?gmgn@W<%Lk%F1o`0pOJ;(4zaiWg~WZR--;dm;r@fIjwkeb zRpWKW?83^qR5}}eW{&lZ)%V7{&C*&svinYb^l;gz$S|)z;8+lN^iAW<^oN183a7`N z-nUNbo9vn}ZXMH{<2H6?y!%*!?bP60#LF2)N={l=Yo_RD8dWR4@~U68c{|sC?a2pI za^~Q?RWq7JidbkF8N3EZ&8!M8Q&#zAQnoX0W=6$Fs9yYd6g5v1%t67M8_^h1^^5Nf zEaQ9M>EiUkAi=($N56(UM3$R}_IOHP%im?kPXvlE!KD({ZYyovaA|utdL#a-Z$?|E zYu)b!I3}68i7Z$?U>ffK&Wh8)<&4=5eg0C-JS9h~KpCnSs-J;%msqCln7={+>l&sH zOGU?1e=d3$-EL;cdHxbzIEL*e?fz}O)P^0|45sL+AzQ>w`#4BxZIi-nw9g0raycdM z6x*?vbks5VzIn%bO1me@m3HIbFn&tQpaIi(1eBb45u;Rvzi7G>EU|`u+^4m$vQ#tn z0&Eu%Lyi$29l6gc#1-J8Y6saXxzJ{70Zf)7mqKV>hZ)=?75LJo=xZQ z;KcEri`QxQI}bRWK{qZ%HL%s$?|B<09(rJ>RhP3^J=V$H{4`WYPq|;5bT49lY~zyW zbOgG7w8%K%`umBW@4pja8+>NO!s};oX@QgC))5=mzEKDkCt^GE_$LIC1(Gk}zn*Jb zeH)pOkGJgFKrH=mYfsdQa)#Z{hapwBvd%oedfxMfP}5n7i>5ZN@tF89z<-FVkK8|Y1jYj ze2JtK)hcVdNn9w@E}-d|K!0>V+6M@Im@Qy<4p{gn3iYzz*bpkW|aEQ&U|HN@Bqx}z%IoL$87#3JK^uarw4ux zw&HejS%hd6hWc;JTdu|#*bi(OY>2Mn(tQyGJiacpWZzTfTvIa)yY-OsMNsP2hFiOW z$6O1G0_BWWU&y>yR`{3x(A7(wxV0bMg5crD7tJ&FFx5O%;k5gM_;z!+oDNBsN!vWf zeA5OfUKG!+iQ7%Yxrq-q7#z*@Yq$V@VVq+bl(BoheD@nWer7qKbNCr0_2Dqu{YMSI z)!)yzkf*!>X1A#f%KZ}Qwgbd;d5>mm^!uU&5Q}B2+h#tv(NO0P8uyUN&snM9@DZAQ z02;rFb$(m^r?AOtVF@u68_eY52e_LP?t+(p;d{MCY6pZ2-Mi*7+y1IgrS7W;hB#2V z$!vRa-><)8um=-iX2yzq!@iZ1-!=Gaeu-z&fX|IPYVY!r zy(xUF5ZP%a5qj(A@=b7%cULsKW0JmYxxKFD%C4Q}6Li`U44vyf%@nnUS=5^2{$?hI z2}`J%rw6ZISlxOBw!4=9sC4)3OGY{lojI(Cti3c~^n48YOhhj==RwO>D`vbpwnO@q zga;Yi*qWSfvKJs%m0a{R{qzP?jFpRKi1;GD+4~cfUSY& zuC4@f$Ao&LiHQ4LDJBE=0SBZFI?d)-m@V(8eUgiQ9ocDSskLg|Z9T(uWM|6DoT~1; z805hSU^Jhg4gr)@wAsBR;$L?=HZFM07Xl6COfP6d2(Xi(1X=}VvPpj;6f6b4dp0=i zft~jKl-J0h!uXsW)1jxA0sVSwM5V4HFpEE)%9_s2#??;#jA9k1Wnv1?F5fzGfSl@b z2>G#vG`ZcwsVXsEBUJj4|H!8PTM0qvElw`k{d9|T!p6qV(UT9^F2&#EOeL=#vc1Q2 zA}9Q0NTPhwV~bMQXR{Htty;|Uf*xKl3! z2o`6UTxq1zCZ3OzQ5#z#E=@%sUzzGGtHHKaL)WTA#Wbu|XML zZ}%;kn)43-<7^&aB_~I>_@ndj?F%u%JJsEK*z^Br z(cgZM_WC6@?^bp4K`ZHaJucI8|;NWn)j=2fs1mf+uT?+;q0JE3` zWf_zhuHQ<4y-WqfyysXnQOV&BIHucdG^!qytQ!5_ujjGdKKgl6_N1eF^O$_FmoY(5Hf zF)hfUG2L7pmcWo02l%Nhay-{#iP-ydT7x?2#SHfn#O4 z;;u9WwS-4UixI;Br7PB`3|&9VnAMZ7u-!y_c&V+%ghqk#My_7{>D3*ffP60@(0+_1 z`EA(c_MY0MgVPkZPHrn5fe~MSOSlQ7i6{iIxns9zN6N8Q7FXU4ygvdeV@QrL70BZ% z(zf$Xdbth646xAc{NB!?uHqAl+;d9PYuO-)DpN*Xg%zpTq{!-XZO5aMOMIHE59+O%45_!Y7!KguE5Ms~f{vPusla~9nw(kva4mgIMY zf`;8=>EC=ZuFwcFyDRx*6xe0Vi)~wNo1;028yy|^=5F8~DfU75;*X00Uk+h*l~;2G z&cDBsQWq56#AMOzwed&SK<5vIMpB!-ujd8-^z%XQ^V@A>nMAu1izv45b0LbXIIAfw zBL~@%gChT#Qd*a$dI$?Ppc!9 z81C3NmK+$oMXPZ2Sz{2Y&_7sv&All)=}x@LqflBOh@=p2azIJ3#Q!)I6bDAWUqO@m zQ)nZxFurn^7(^S%^!R{VfJ{rWYPJ}=*;wd#VJ znS)MI-MS;ECOO)E@V}ON%As%A04f|P!)9#}2)Llmsbyy1{DoU5{WT;4Pb6}Qf5qzR z_QtcF7dV0=0_;xjT=u!miqysG(X7%Ag zQ2-P0mxmHY>)dna7bjzr9)v}lDanDY{8SPCsSkhcsDODAP^u!~}jksnA9 zCz2|Cb&QSA?d{aiW1f3XcNmuk_}XCP#{Zqe`_DwyW`-@VRLxFh_)wkN<^(_V^s~6& zpTv0L#I@?MW8+CYv9Ym~M~>VNmJ8exU7PJ)|GbPr{bOgS53p}r=4K8{w|lqZldcxI z?8`1&JDKD&a`;=aoW+Cr6qSEpo7rc%=sJpA>z6b!IeF&%c{NgC_j?;yWMxaL3Tn69 z;L$(9HSA=;g%aa{D-xL1e)e_g#>Pe^ES5Gw)Zu2nW9Qd;;$Zo%@%C`$Kgp+HTh340 z+;oN}y+Cohrq&VpiAM23#YjfJh0V$2i-9QAKY1P%%IV`3UPEr=N~46d?e+QkBG=J- z&z|vd^p<#M)BCS%Wc*z>fizyvGI!$~GK(vLNnXF&4!6HCQW-1{eAfS{Ug7WUID$Tw z_&&0BX#^Jc=E(92pp#m~=|^j;kn4zIO>M1Yu2B!xIX1h;=XS)0PuS|$E0(#^$e5WU z(v1xb)!&|o3{Or@x*SxYnH>4|7X4jjF$heW7$5&47rZ$hzp%WFe86?}mjTaO=E%P- z;F@bpjx4L}n_fv#k%Z<`3A(|+^=ARRoSgf-=lWbNv%g~gHD^|)NSh&KKN_U@w7|NV zM%mF`vpvIKA6;yk)6V$sBeyO^L_}29Mly{L9@xJ>CwMPt(lh4@;|u2j4%2NA_PddZ%b@ZT+gO{9kzdSB>l!?A`0|0X)!e59TS;3~7`UkI>P3?V2WM0+691N?NUm@A| zBZbE5`gY}j{sSD}=0{e7T#_=U5%5Rus6F7ujnv$h@L70H@BfMqs-7>t4!2=bxoBu; z+R}9nU}RQLKoFRCVuxtG1GBw_^$~Pu?#0Fq&+y-mW?*C->9h#0O+kk*v>REZSX2c0 zkTxfOoBL;4a#**Mmlu!8>aTX3ATu^q`B=E5G5i~_^)sz$j_drK z{Azkm4wh6bA42|@zi}89idJuFDC$cxc+-n)!{VWUWKarZr!?N zoM(HCG#N~y;C}FhvrT$)>_O8UyG}9FrmL&#V+q$6JKJDbI(GKxOWt$#?I^7r(s4Qi z>adeMpTB;+*STMS0ouulmuv|3reOU%aNSIIUds!mqX)^!zt$tT zSKpOosQue`_B|gQT)e$2<`=UoNIi$kILi{HJTI6QIQ?AcdMsqMP%lSnQIp~Nm>jn{ zRX7;*Aedd!?GJ?cD=RPW>?}7Y1UA3_dzMUvR`t|&-ZwIfMtU3P9zA;&)z+rZvvMch zIwp_(l#JgAy)^CG;p*^lEC2PmT0j|x8OLAfh>Bztwx;~(HT%`wIbC6{pP)SU>#2E6 z7!61CNfaIln$+>J9e48T;RzNK70RIUwN~N+X?y-s39EkX!w>y;KerdF%iOtpS5aPm zAK4I)CNs>F>&%&pyW2%yf$1})0QJw#JD0G;zHzVh(>FGb@Lirrfq}sS83Coj1sTO7 z$aP#{2^Z4AE4y-VA$cY8{1Ls4>6z}Ue)<6#J<+EweJ^sgI(YElP+ze-gP)8MhX9?9 zK^0a{$#Z_-nUUHViavs}AGZCMj+>kC&6_uKT!s}kjn&oFiv?KBk^25vCNn-W=i$Q> zV5Tc%g^96AQz`6O#<}~+L-sW$iq~i8rKtzLV@Iw$!i@JA*7*n{`Sq zE(5`~sTzq8*6M# zX<=dU78WxwFaR#3Kp^mc^q!~A>%J@I+#}(1pzovS#RQZiIlM4X=07@88~M4ZNx95# z6{?C5751IzUuh1KF1#cK@Yxd%-@*ze;!cv8$M0@i;< zQj*2I#OqeKlX5uqDVL$jePk{^17NJ->FJ9Hngl9#_QRB}lm`^o%tH$LsuTW}1P zC3rH*XF99U1vFq^nLkrQ93T4fWisGSnC9X)k!Fcf69*foIY~o$m!7QE%u7nuAP2oUP2DD{NT_^4hrwFAZ~vsZIRdP z$))9GoLFM6eGAtg+}tm;62%ZmFD)$z0>wUVw1wbVZJP(RFkE#QrSE8REcAO1R@sX}A_yCGneqm}?2Mco2P}?R)SmV@J-v|aH)bDy z|50Ei>gE!+@gt$MurK6fS4zCJ*lW-K+-8&U`_NY2_*m$G0_6wiUdEP|7JwMkDvXO< zIKjt;hlg(f9b5s)sX)20wZ#Qke88;*6k0>9*d>3{euVR&ax~lRjSU}`^Ex`c+|5t)SpDk`^>>ZIRQpK4E8P zXZ0!cuyEVkE5*mj?mc{Xkn>>EQ^^C*r_yi3Vi8UljgjNx<)tEX*<7;O*xp7UdVRFx zKKOc7bu~Ij_vw=-hdB?rxw~u3o&kIfgocHIWzy5noj7sgG#_79&8!`$v{ON(O}oA# z*8`3nZ*S12Y4k@KPMV`4Itu>}~eK@PyI&m;)PubY4WW_tu9A zgXK}@^Ag8P{-2PFesviEFO@n032m``5RQqa9E?r zcbR+tfdk{6caZ%m0S%Sv+*_~#%F1Q3>6EBrhh|c$?OvIb-%2_aPa$!zZhO8$pzsc3 zaFUGXw*=JD3ewg^a%#47d**6`X&a5g8g9h&C&D&IN=r*!wpJW=KzJaO^*zrH!-59{ zM%=u)zgVWpGWRr)C(t*PJ`R#;J{H_}>XMg-PoV!UABkP(YHi`$wV7^uz-BFJce`PC z{&vJlXo|8tf&=lvfYBL3(bJ)!YsFbLot;dEZw$f`zJ1f&EWLM+Hk|LAh+We`tcOS1 z)iOUIO!#k!YrjSCE()-4O)PPou@ExMhG3#tA{_(6S&L+dK@4Jv$Jp7~-7dx_CnsMS z>)m_$;6VuWDXGtc@vs}8oWvR`b#{Dj+_o=zOj5u zl40dD{ZL6Gw?jr&nsjtFHbf%_?JLfY#9D}tv1;k2DnlYRQ7FN54-_VljCNKG(vYu zK>Gp9g@A#5Q)v9ti-UHhkR=8WMmBu;B5!MJdwc#_ZN2}ze@*JvY~kS0+*~xbR>BbQ z$=eT|XSv*zs8FH%4v-H4!H<7T&EM61t-o0WlF2nT`xOYz(Y$8ai@A?hWsRT8v>}>~ z;aB}A^E~WP?zO7@o9i&+GeD^Ty^bA!dO8_?7!I6W|Bc}>Sb)H?B5eNG`dM4WiZ}Sx zDZu!~N-IXA^t+0$gjK1eq~z3(S4Z(}={F?ZC#2WrF^Vq_g`GSFVjLsr`AeTnDY<5I zYwPQ7Z&g**V}R{-iosP<1!=nCbj`f?hk+v3GXP{jjgV89AXw@s7+x^35#&3Uw6rv$ z#vnLK*;Pta!UUrA->5XPXYX>IyWAr4LHRCo)%FOj&Z8P^>Y2fy?JVgZmU2ILfjth9 zAz@4-j`#e!`8*S^F&XEbxujEcbo&GZ1m2qDAz>JvfDP`E#0!Xn(S}*3DN2!yO>AC| zP|iJ~z(gOz@vEO7)%io#549Bz=bjH$pp7qb2u!~?qNw=pdWy2jjDLx{yL(F-hC`Il z$Be}0prG>7n?}C#euK~D4-}P@G(5naIddi#!UhX|hG%B3C;zB&YRlo&tR5tTjFBUC zb@!iklZ1m>MyIlO%Q(nD9p7QH2%v*{6vKe6isDNY$gxEbC=fx1AOypnP;$CVedqLL z*Fp)SjS53T}^(vFuD7bw9j{&$oCg}DuU%gX-gg0TMneJ1?d{pCEmY(^ZE1B z;cw7{t{nNGN+rcHCn}T_#5ZP-`GHeF5m2tU0gMHHuvkUGL3X)#-j9QsIihgDPotcm zps-(zI1n&+Ydc-5WqM0uL+y`{b)WDmFBt>3 zdC$PH2SUFBM4QFV3X(yPTv);n*AkYNYMt3#MdD~#;Iy%}B zipmez{O~tpSUNyVCNo^BKB|Sm)C0hBJf{se;V|$@AjeZWI)j7gV>0*o%nA^-t*WKv z>1A|fKQjDD_zZrutP_&BXykzm$XVfcSId6Q%+cyF)M3$&EN6T2o91q|BFjkC%VN(E z8=#Q%n4$#|pO1k+G7MCqJ;Oj}aFz!Tt_x*M(&Ks%rkCY$iyZp-6%9N)WeWR`3Db^hvsm+bLn``(ad7 z*59WP-`oOBfJ0Pwc6JUcKUaLo7eoY zzFhr1?dc=$^YVm(NdX?e+SFv%fL4ZoywdnRmkVJh-4Z}&r^1GU#az6M)N6pAV*a$s z$IPcs*>;-uc4)2!EGhvU>cEUIj$BLUOgnn`u&Wo-Ts2(Wl=6H2@A%K2txe&EWSNGO zlaso)GvYH!sO_Fc_XmdudKGdo9m)9Lwl+jPdGchcC6%JDuP=wtFJi>LtR6ZhU`krMIjsJP3Hc58}_* z%sbli;VEHP29ilDJg}81KP=*5W3I&fo^R%98JSbb!KC7yl1AI=o_t5w*&Y-!33$jE=5Tz&JxB53o_eEBwilJ5k|U1p(AV6-1l%E}amDjDf>b`>&Lt>1Gs zu*)|cPwvdNh^Y#pu-aam)t;e?4H9g_K3ZApCCaTZ{6M1L>R*uF$Qq*NqHo4^dtKz_ zyuk$EpT6*~XFL4;!@AYrAkLNp;Fi>Q5 zJ|zV;7;f@)EYZi!`>bp?(e;D;=9C`Rm5buJa{ub@og?~c86U4VKPcsVzT5Y4(aOEN zH0GbQ=qN6qPp?QPF-3pK{JFxVt&x%wX%+SF9XiW5_-p^V^FA1?0tS;c{=O#f6q8}V z{U;#`>=kc6BayzoHeXII8T@Y-!_!Eolj30sDycm_4KS_2sFbh|mDp6WB$tfAI(mMN zyD4EQuPDg=Yn$`3X&M529sB%ppW*(NLa&tS%dEB#k;2|DfdFi%E`Ku90LS5`bWHaDR% z@B#=z!t>XOBS()y*(4HbB$d6rtR^NVpW!(V#C#}1w7k}0efe^yyx$q>?DidRS)iif zw>}qzWK)+eu|oP4Lc1RdF^9;YR-iANjY*a*@>|8*XwK^XXGQ9nX6MtgIY*u!`e_&B z>Lb1UgD4TSSkO8cX`V0luFELwl-qL7@oIt(7+pAeB}~a**7G zlJlDANhqC0ICj3Pg5(3#m%O$0AKaRc$v6*!CJ2Mv3(6zBrXMIlRoJ(_K8DmDN=v1Y zG}bnc<;#~ZIo(bm79f?2h>B8z!VwgF^1dcdtzS#WrDq(`XC?YFWShmR2)8_s%?_;E z59iHR)ir9kljY;GXqV&Og<$CA(khsG%^Y@F#Jnz$`cnm>ElQ7Z9`vd=$-UqKhy84= zyN~3`E&B6PQlDR4ZEfBeS;#v#@O7(jy`Zw0F;#23iGlr1p!6%5dN+O=k;%Z24>PPW z`h^i+Kf(TlorL{45f0{r;ttH4?6Q}a7%cBJx)C^d9~tD{piUvHB)bm!3UVVWs1qO+ zJUT{3awI(Bw<;Sm-^rR#QgsqZqP4NJTu3C9!uu6=Oe4C4W{f1%VS+sxI0>6(kk zNLJxsZB^Uy=l2X;7luzH=&Mq>AVP{ z0s3!LU5!6_oCkG3=ikSUkLD2gxUOYyZ+|=8{eq4p;c!y<>RO3#o6{W$ zT}!R56Rym&mprQ%TwAeuaTH0!F5|G*-!v+4U0oSI9j!-$8qHEQVuI{j%SED%`G=FW z5^=iU*`%j`-m;S^TUbqSUP8sc9k@AJT8eWKdEr^2EpgAS11HU57pxSbT)tsV5KLNL zQl6Ds6|`#VgzX)96`zPa!1otdEInKJ>3{A@M07NqwS!tH*>#BWyjO~_VF{q)$jG1| zaDelmlNc*fyorwwhX)W)T4hNTC&KlB%5%KDHBi9QphD@r*1VCOoxQUTL0IL=6*j1r zT(r9>M}|Iz)-rewyxDNIFrlJ@dVYoV%8379#Z`yfq!8t613x@z6`otgd*@iMGH!@2 zR_7&M1&R=>!|YNunwCd5`bS%~g`Nfmkc0e}CsIo_(@u2w7tF`)IO)@F(=~-Gh zc+&jiJ@MMgOBTQevbnNssP5s2X1PSM>YQ9Xn{*ag!pRAKlZ(Bjucq(WT&&6b{$hPr zZ1$3s!4x~p_d#G%p~opIg0d#n&VH-3Zx5R<-E)Jk90V@;iK&9tHHyrAcXO^5-7?Sp z2`k-uPS;g5O$*qeN#+iccw%9p-hZ&9=ba=P z77^$k-AYm3M?;T?UoE|#G60(Awi8xA@udk2nHS5wdghcde)}=QdFsv0O~|P>e!a;? zHj5J-_8gwv!7(Z*dPDgUisVQc_>$KNzJt7sM<>2LIY1;5p=37{&Jl#1b7=YCphs2- zk31mx*1=*=hv+~BlQ2+v8%n*KD8B#j`i$rFW>}PvPuIrr!EJ;z`u&dG!N#b2W)O!!AI`UE&l!2s(4FQC)uVcP~e#mI3JqKZye3rJ8xE2 zZc=yv=mps$%mjJE^?Po=cwRm&59^LJY)}et*genJX5GzJnCu*-3DwAO5jU3_x5IFl zTP5wYZ2LPsnr3k)UNeCH=l5_t92o2r!c}G!^yK1a00~olS>hxS>xXRKiEGbJDMal? zG=1sY-v*zXE>N^uSU7Ay)~ZiRj0j6h1L(||pOc5>gP+WW$BYodwL@n}F{yvVo z;48S2QH{W)s>TRsoiFP$ z+Un=!kHE2iD+K?3eE+)#)i-+v054GLjvYN91jHCV*>wOt)g-~DDtQ#J_x`okvtlxn zf)}a0>xA2=B9>qpRtn*M+nIX80*Nju4fa4zOGx+5vqS)u`sN}6D=yXZH9g5BT ziR}F9DK(Mr<$B`bd_HSO8Jq^(SIV*UzJKV+VkmEbb&#YE^wIOTSa;zaI-4gY>sqH-K@bl}hr34~pT%SjG6f%pL*E6^4cbJy5L4HM zZVcI4&PlO_h+yCUj=tx^{9V|}hEsTa$I>~WHiRw5e!7o$eq0Y~=4?~sNs61^ACgFS zBbl%6^WqKrZCj>IV!^x|ci|$`!(SZNh9t7}Ps>`#>6cv;E33(AO>w8$44ahWh7-Ud zNvkWLUW~JjyVq+!L3R|0OUltSHQ*@L-FLK&ckEhifhAjt&4?zCZ5u7ZQc}}~X1z%v z(dGm4;CWpviQE?Rav>^~mP1VM(`CSukyw>nbp`@!MH{IL^O2VwIBaR5`E{eq>G8MK zYLV6T+SMhya`y(srKJu*?B$Z2Rskio*!b}j+)4;y7koQa5IY;8R*S?87;s3A`*QgW zJCIuoiO1Y7U6*0A?LLBp?lbiiFKqc}9``kFG!Mn9$}MYT@R;CP@GJk3=OBCz^YBB& zP|S zkDShmy}jYmW{pJRz(pQcAfc>YPlfO%8F&)2MpFn+R^3d}-#V||`Z#`t9kvET9{*zt zPEa4d$~uR2oZ0tcqveeM^~YuQs86YSY^J5{hWPX+%o~S50WgCA%@q+l#_gSlPG^x0 z*bf?$udsFYu2ywi*fI{t%>&mVMsjvz+5U;YCpgH(-9(ptFis0<6#H%aN@1tCwLK zK_oAQNNukNb+l?E2|Td5xGRg&v*Y7Mwbi^bciAv0u>e(Xb-3%*)d;$Vb z{4LQUmc=i9*C3;aJxUs7BAx*k0og8i-kutaaZkk=k`K<%%9NEzW)vqSYz;z#u-rOL z(m{xZci~6IY^o5rKfUB;gv7lY_hu;D&~kARi1Y+u?;v6Eju!0M zGsvzx;@?jlUfeDgZM1L&hRsxIGnux_r%4U&N} zM!R^Pou9}3DD5+!lO>~X-)qY_MQX{Ou@E1f$p*^L|3-S)j<|xHeZzYObKaDVSI;=| zPgVG^!o}2s)mQZ%Wckn-fb3DQ%P0MO#$T~;sy&$U+GT0{coYcOypyTNf z@K;1Ch8`4GQ76BA_W4VE$^>LwO7r{Bl^%zNwf z4Ca4w9VVg@HiFNe{`;FfTHj(jzKZy=*Kr@HzrXlrVXisJE+#w5W+nxLG_Emh9H2&K z#QVO!XMl)Zm(Zw#xEA#*3q{Kb27uBHfWuD<-db>_RQWR&zFf9$9Wh88i(3|-KZ-*1 z0Q1`Qe3UR9aN!Dv=>DjH1k2!&btHv|iY8tPO}PI%D6PI$1!4PGaMt~|E&cYKdZ<7J zR{qk8H7+~9wC?P9u{y_3?+-`<=`U`_X?-ucox`NE{3Gxo9XAq0aU^3jz% z%Qlq8g=bGfg`x`5h_S=>&cYdav8)l5%LpyT_;q0n*`)l`f4fSb|Bn$!6vx$`w^kV6 z0SAMyDmg<@P}ngmH4Nsdb|cxg6bp$ko#vz0%~SgOEWmbHDRX5(x^?JmG<4@g5UbZX z2k;>l%N@dPnn|)uNZk8n&j@Pm>m>=1HvMwwh(NlP6e!_Xa7y(koX985Pd!cfbd+S1 zGFUlAxR0BtpS0hwZvQn)9&W0hKj_WvpMbQIIX2iPNwF z));ZAfD;jMTdQv23EizEVq!{o^=M=#Z`>Y+;TRRt&9U3$2?aUhvRC$VeC z@@feTeA|X~znN0UqR*ObZcsDA_@-I<=RIve*W93DQ?KwDTpr~shNQ)G*uT%XX&fnSNXLo{+>ZnfeYKV* zowcDB2@+Icl2FF6g&N5cn~9VIq5rz&cACbH@6*p;RK{X8YyPk?+8EPkq!{b*{G0qTY1$QZ|HP50G*7_K6=(5 zO&OG{LG$L&@`7}mz92H;FrNsJ>a0}BCyMtzht3+L8wgsB0?NnSkp>gpChlr{-gC2L z0<_`DB7Hs3O@p*1LR;7t^<3nbh#E2{=z9=?zD^%MU(3+}EDK(k|5YlqGE9AcbwF2u z%_`lO>Tlbhz#kXMejb=cpavT`Zu%9^c^aCU`#(;++}pe5EVLeIKRJ_3GtF3h1gghH zx~?wyaUM$-$Z3*?T-Y%Nj+u<%k1iU0f}@?mq~+wrHa$##p@iRKtR8eidi?63gO15h z_&jJt7KX-%yNP0kVF`TO;-E-ALGLZn!U#$y2l|x$uwgRad5{%_5XH`;Bhx-zVgmckT&v1=Ql!)}gU|8!SkJ$xqzr&_Wv zK_3ltw}PmFuCWlS$vXtHBIrGpC5>1?%hh9PM`1&v3DTZt`8mA`X>KWO-;80s6Zl1G zGW?Vjdw6-s_6ysrgsynDFK19OxACL-7pK=d4Ssci;@-(~5JBn-mzW^R54YCP01q2( zcydf|f3NM`%G!qHhO5EQctmV2^M`(x*-LmXa;Qk%)ulEL@@6X5OAKaf+!AF1ywo(! z{r%=aZl(dQE4{=D8U6G{!c^3)i%ieKE`R}~c{YFA=bFisxNuu22W5TFtRr+!gnh0l zLpo`~qIaPd#DKd9EsFPCw$4F2QIKHY@KfZavE+J+C>3Zm$9-wM4hDnn8E6lOo~!wl z`NPm`+jTPUP+T9?+wL_!Xaze}klc7x%51HDjBpbgn_$}*r@h~SIytC8Actng7|vAk z23PW?!-_DXP5P#F6;=4Goy?Xe($5H8Zv;92yHKZfeYH#asNQc{B6&oiFL*EkwBMA- zcaDs1Iiso9Uu%i`1JBKJofKWFeY@GP0JXj~cG(*d2UvW5j!%GqM#{UT&5a=nji8$B z2Qye9`vM8TC-<`V6=(tneQ78MuX`U5RGGGK!SN>#x^XQeI)&S#ncQRQ$3Ez-f}WqcKV6OQ z-G@O7qYSfDp2l;FFNA8%ln9wu0=By$x`U>M+VGhM!w8#cZw1O>7k5ptl6(AHsDL>Q z=y`ZZNG*zjLN}WL^aK?1>ci2p^~>(JUc2>k%|e1*X>bP}2M?OwOXw$!_%F>yri@%T zj-McJzCo)%ICha8pHLXY06@42K2ukg0!9+FUb%PVRsyslLq9V#L_=J=xmT0|G=}_L z0O-v6bgu<{&u^HSI{Dm3*Xh@vqc-rfmYZx~>Gkjm8qR~SgBVX2rX1#kcCiES;^9`F z=g>CU6)3tNX-5EYa1=C&U(j&K|l@ zd*k=YUqgWf+@!6DC}h1ZxSo*#WCC;v?PpN*I}lEN&#Yi({n26S!*RV$@FxoHnSF*o z#$aKY-pw}4j5j1)y$!Vab#jIE2Zmo3{*(fAUtS4Ievx%fX|SREnG-a>!Hd59Q8F&w z*Z)IbG~($@|}) z;vA{||IzgwU^Tz*|3}iED(yvzXwgx$G_;F0?V)wj-kT;VsVJqXU0T{oX*eM*8rm8v z?X>s*e#htY{jI;tbs^`x$Mf9#e%-GpcCa~2;-J6rIa1Do1WG&YB{x74$#jtn^es;p zl67l|-<&%29*PDZH;$j}<=NG)XrjOR+oBJOdX)B`%yZZ?|LpqlBaKi-kZ0?2KS|N# z-w+~<t zub<2}g538{f3_n*N6MD33_M@oo#S@B05U0btqukhXH7JkP3|V{YxO>XpcXtq$xCk9 zdcw7LX;`nZQ&5k_^*x}0oGAjSnM33S>0{jON(fgVjPskdEKaO{^rL+2cR4*q9#A4g z4$sHmQ}NzmV&USEB4w0TG1q`z=Fg)Wx&;2xE~@`kT%Nz=-SpINiuP_lx1Waju1e}A zm-*=CgHZ_N&3UM#jHhbtVa*1v*cR!1s8s3L^?E0LD0?%(adg!n_iw!Zk93Ht{+k){ zfewBcgjn6!@}BM^D0i>@vEuehV0%PN)mwe*te^yeL}arzZxzjkPb1w^P_d)M_DG}y z)LtG=4_42xP`&c|?aG{4$g2^fo$T;wd|CKEHEaD$yAq@y3J_zI4mX+TSoDW6DC+*x z{;O6WHZ>c5mG=ND?S-05(9M3_Gy$_>e>=CBuNC#*M)msP1{lOET!4v0=43| z5~7EDs~*$oG^+mg=E$XxqCJG6-)}RO{o4Y5~@C3e4R{;t^j@zf>k`0`=D$WrmS_VPoC77iVvc?BmnMk9ZZv!-vj__$668{d+W zF;aRW#%U^eGJ?x(o}t8quS7|ZftvV=%#9n=CYeH+aVJdFuD#g^3wXA)MBc9Y{Z_{O zrJBo|({C3gz7Km&`ME8vc#5}QzPv82F0F1x@UWm>B^b+Nf*+w5#LJdTc}!54K*Y)+ zhc%iZnnC*dz5Rkn=((=V_9#uD87hOA1??!`-p`Q5?fk(upW2Y-Cy$ZGkQ^&nuM{8g zSyd7B-cX(>Z)>6QJgj^!p!>2onKXws~SJHbXccuCDHhG zv8jnKaOvb3Tmpp%0(gG6r`_G;3Zx1V>}(YWdFQ#grQF=y0+(7RewU0IecG)rq8=cR zB)&tSdd{i|bEX;R|M(Wpbq5E2=ztH;SaNgc^E-Q=LvA)>@g*;>&@LUmgbQCvCFTug zc4;u*f7<=Dn-0fxZV8eZWWp+4A%#?hyYW-8=*VG2W~X5y!KWJ@P9;zB!znIspikfD zt)jN;JP}9w@A7Xhjml|hX~jq^x!!qn<~L*Qp`KFL&N4|Y+0u`YIHr>_Hteu1E)AIu zfxK1s=>FLkTf&yfJHnJwmiI`mss>Gt%RG%?l6 z!OJpQ^lU3o+fon6+?$A^+wle{;@;LoWPx3FULHdO;fe45?97ihWK2qRP9|1{5S)HX zPS@d0HW0>RsBpWCi)fcGwBRnqUHIOEr*V}4o#4I&RoDQ|Vj8Nj~MCR8T>C3AL>5F0fQl8@% zns)SSKGV_sZEy`YO{4h=3t>q;Ad?&4n73}JN-xEWxbs%r>u+>tz-6a{<&$?XWuKZS z`0zkUQ6@to72kQ*ljEnjGzc)+)&&6@?{iWoiMM KwV86$&wN5{uaW#r^mlR`#T zm&c?LK#xzhBt<|#hU{G?!jBm>sq`Kmp;HZWPNzmQWhkym!e)HmQR?P5!;(~0S0B9u z5iFXcFdJ1%fRjhcGmSFwC;D{{%$!a#N_ikFsP2C==~{4D7!ka@Hv(FQ=r$5rN(WfV zpwu9##8;OE1BjM7MRmhu3CKz!s3U$z@YxMg6u&-e_k{krcj$28v~H_UxxKZMKhS-E0D5tAQw{iPm%C{5=zecJ1L|= zz_ghlQQSA-|NNw)e@t|AXSt0IthI1-Ow3BZWeOq-4{`p-Ev0AdwX|pekk63y3;eX3Ktvuxz>X-> zy?q)g1pW`Olx<`AFttR1VUfl?j z(S1~A-pLW#(~d(zB#K&RExx#a5K8yE=TH;=>JmTJ;ryj{H)2+wRxy4bTkLsd`%@r( z=4+3vL8ozeT1(tJ%GjsG@b$qBreIfhkr8uQ=7^u^2gx@6DfsV>8=G<$tiD8^5s{a= z4FIju!uZD+JYy!no?t+K9Fdo!XA~1ldf81&Lvt)AD&fK}?#1;ug*K)*ExJplg3t04 zm0w(=l?YEEf?G+J^e;!=s@23W>?)g;w5(pd=4U)Izi?WE$mH(DhToQKq;_`i$ z4)_J0pZPHpLAY}zQ^|j^T6b%hv=^rc!lEij)-U7G$ z7!e2;*b$QQ?(YKwrZ6di!em%9k&(NUSNzPt&iM%Mtg(Jk36I0F40$zBoa;a&pT4`l?CWZgA z_36ZE9_d{&3;9;>Ja&9cl-@gQp^Am8$i?Wyoi2ulpG9t3-#Ll5s#aNI5{fTgQY_O# z{$)(mo*tyep`eP%;O&6%{)X?Cyu|}n|KtZLoE=jY)fBM-yau)uDXz4ZIA#LI%LMQc zW1a47Pm)N-GP2Tqx<{QCmR8p?FU$xzcqEP9{m~h0`)aJo*2fZfRU#^l^L32-RPZ#CDF=Z9PG4{%6g`7_2l)L6?6OW6hUGT1)PXb4dS&cT3T&BG^y<-hK|K&t$iDw)aotgY6PZHR(QolP5KL>J4 zNzxNJ$6?W4@VoMgN8e$~&v@1KXDKz_?add}e=57Gv@DS%{?pIj+uIxFK?90@pLwW# z=8q+~217hEeMPLuGM)nq3CjpT2V66f4<9~kc}DQjnGuB7^S*qzpE$KuXA%e z(6m&*6WhqC)J8s$J#G}My-chktF3V2NfPl5#dfoZpO%kxY$ZW}K62;Y2PsvGLhpwu z@-%|3A^Ng9MSM={`Bq#e4QI#g+eO{qHy0zL%s3rRM!oQ51?%7wNL4+j#qe2<$7Z&o z*LJY=5xyA-qnpR|+&me$JU_O0Glvxv)`z3?{XoIu5Pf{}{H}cM0qUh23v7?I=S{}8 zYyo&;dnaucSWHcB8bB{I5pNBB{YK(-GLKXsVbq7zQHx&ZaS9c{-;Jx!A>eDqW?=U~GQpR8^S(9Jj z&n>!vwVJ@6axkb&Z%3oexRm>4d8jtQ?=^H+mC3AVI0V)ZW~Txl!a!iX#S^1E`huR6HY;)tUSQ5Tu;6(R=#9jf4r?v6uFB4FId%=9LVvq5mR4U~ zT`lZ1NdPY`s;#XpFE0;{Du99qyiO?pL9QExY2MK%xtk~lPbIucE&LVZ6`U;nr2nD57y!X7|1BYEvAw_yFh`E(%-^W ztSiBfa;fQWn15gT*&R|?ce7y)DHvtM27rzE72M&d%@X>A^+h_kvubnW zaDn-24h)a_Sbv1BXqfcn+ox|UzbwTCJK(Ubc*>MosB3dBjZV-<^PPGO+));Lk{`P~ zC~m(fiytl^!$EsJ%800?M8bLe>uqtqn1q4)M<5|x#G+NBSI?Y*)QaPw_FqTO4PfFA z#U@QiY%T*S*85u$*;6n|hC+#mh}2qYmukMQ=)Oz+yrEFX$WB3Pv?$c+tJ>~W^r9T5 zu!Ua4Vi)5Dqhq`V=S7*UwH+0RJ&mYRK@JOMH&k%coy2Jtb}B zDyev++`GG~z6EMvNSJoWCM#GatWPN`XM%TjN#D+?pAJdZ(AX7H%fs0Hx)}4x*vzcS zd7V;bj&jhW+1;Op=ma-6w?~^Kfy=M2CvtOhk0PHCh+TrIWS&A~vtjdjo+5|E zXz1zsvi7<2S0gFo)xXbFkd~)_<*93Ul<3l3SHZEeb9`~>LfFZzV9vg317~T|_}1(apofb+S1k-Tcm&0_C#hf}{3xSZ`xAzah1ffs_99CT zf*IqwRtU`lAYv8Y*ygVWp;Ib**Crk>& zCpU!=ZDie>{gHRs;Xosv(qTqEqUKpY*!YK&0jDZ+Kk|KeYwOKed-b}sEo9#_mHas-tkpx>V1fq0=D=d=D%)~!I__dlQKBa^tOf> z)HzL4Yh3;AwbwO+Y5C>0j|a6R^s=f=UFPo*fSBFNJd_(6As+EW-oA^!+;dHp-M#uaqSx_KPP}4;^Kl+Rx};;BA=?zR(3R1PJ4n z_3wKbQ{s}kP-*^!2`tweuEzH)?Ej6AxTCI8S~|0>=Ejq1=Wjs#iiZSMa+ip4WsL&; zJs9d{`iVi2;cHD|yvfz4O zfPGeXVgXB~nuORV#KYbf2q~-0hJTP_>D(41f+r9C+CSb7h=+Q*tRvNe$cwbTL*EFq zX(<8Q_LE=;DblO%6m{Qyu9>Q&!}xuD_A!JcFGS7Q>C7H>ynSqDDP6_XEtN_B&(=Xu zMu89!z}!C;T&d*Vi9Zi{8G!%ZL>h9Kd)`#cVv83Xy6;H6mOxy3W1ynH@Vu;$N15VC9XEv!o@*R-dj0%ADOcP+`qHJwZ`n6pd;Od}4)(6X zQ2@ohyKc92bsO8;$zX(Z3+fx}&lOjIVPpcZz$lhX=@c0T zr>CFMF4QNyeEIV2J9pSQIYS0p-nz~io0uGnR}O)Z-qbg5PNGt8o$G{Y$J@7W8xOyA zMJ$Z_gF~2@G_kiQ&($R2;^Kn&xymlTd!u#Cz)|t_gN_V^%!MPRJ&lG}tTHz(H#+QD zKtbN~S}@67UPqnE~Yo4(j9GOUem?ga}cPkUh)Aix+Ph7?4)$lVR!Vwx<|B{&a%O zvteX}1@$x`fhboqtkkjtp`B3ArG^1G6iP`+DV$Y;1^!-KUJknm8CRfUCd7&`RD1pQ zZ8|(shUW4l18ieyeY*NH!?>$rYUV2pQ<}<3`v-y=$ z5TQi(=yicdM2yfmkB|=Ag%XdRSlLWYaBFmCRTQ!6a+t5gDag`jG5X8Ns?l$-tOtTr zfq<_owmn}t-?v1=&1%lspOr6K@a0!lca2&%#&$c*#H* zAWAJRMqcjc&z~qv` z3U?-^TPB+({pMiDt5>lw3$uTu~ag5g;X_2U8^)BTp;A> zR^m1H+s+^>!=hkrTn9H`r94$5bk#8u*%pJWErP#a)_ABSb`u9E?l@^_Y&%5s+h#2q&Wk-#MoGEd)5))&VU8epBp8Wq!>V| zBJhGa6BsOyK7ms1=7adYuvYfSPcjhYf@@D0ve2JO!W(_u@s=1;T~3MA%U7o<_oQMN zZ8ePkaVTZ|qd>7BEG%qQAZGh2HUS1SP^3z2Kuyv>*Mtys?QJ_}WMo*$N5sd|SE@kR z3{g2`8&Pp_*AG(t?@Y85k3;<`NPRfE6||G4a@# zr$?*|XcGLowq0q{M+Wuow|ij(0aj;`l6Z#f_kYa(z--Zua!}n@>fAfkPG|G-^96u< z((e3s;JE^W25dGjKquJP?(4Z(*x9x7@F=OOVuAd_#QIL@>;A4T%5VW7Szt!-yD#e7 z8G*)1N=mvq8I_oxh~+aXY_bn!Bd~==1dco=h?xT&^z=^&@yc@;JzI3^?nsenfyl$~ zt;(G{^uT9PMSKYKhY`Ra_5)ri=k;qDPtU8s?(?4Rn=qsE?uKnfW@eA~JLNwmwd_no zO58N1|4~TO9UO&uc9><;QR*j%1Tg~Z6V5Dh3Q3zHB1mEMt8;RJmd`L8*cNB`r32W? zPJmMZdjXA%qf(ig&CVQ1A(&Yni!;hVkuo+l6%v2havbuob;HXVtmfwC9>DzNn#c@- zh-lh>DbM9(?zeSxv@`}2mnV{{y*xpD${40Uj)D{_WbcuX4T?+hNWoUmpEMY3f;0IB z2SFo0VzK_q|6&yUTU%SJACD0Z^LuaH1I8pISPZbj`ycz)wy*?DGCzL)%uK&WfU0s| zVmVle^gjv_+k)WP?ujRV+--jOr#19XWme63r^cd<;o_Y)GNJtxD1IPfm>(!dDq8_o zoN$Bp=I$KPPAMzm+gY7{(5f))N2<&`rI-*=+9pA#+BABrPUO&4sk z%zN>Am!JzQ%i_nfJN<$(heJb+@cy|g6wdRP5avc;emPnZV&t8FSNY6u>ij-zf4-2?l0G{-%PBFDkt>*44}-_wcKjZ_g_#Ztb+H2iVzVI^iQuX1t-fa(K6c_^I#*9H#?VPbsx=`Ms5KqH5jEa6WE z?i2JIm^2!`d?A5^8Q2~sGV6of=r3E4K%Vd!3E|418Xe`RoE%KW?t;ZWosf~|RmDBO z&3iVWlaPlwQdCT=`q0|l-5n{=T)Brrkj6uM6L=FyF*4p}f zZ-E{@+!SIP(8C7;c)+*}tWeUNF=NYPO;1~YLdK6I_CUIWP<4P^UH)n$<$c$}Vm#$= zkLyTApg3IYu|N>q^5J68g~c-h+0Ql~UAxf~^+F@8xXc^rPSEQ~Q^e{wyAx6d zwOjr20Dd_FEFBQP{g|A@1@C~EL-2;i;q2i3AU13Q(xGGBL#O-u`{|+ih!TuTsjjMe zKQfYRqsJT9)YKFhlK{OD927zx_(s>oA$^(!MQ>y=U*FCyhZr`U4Yabb&MV`G7^aYBS9fGt762AhtsaDf4a z$N-ExNQQ%p3*rclp&(F#r4birdH(!4MD#lE)JvUyD#8|r+Xz8p0sh<7jylygx4+V! z_0r|bdOa*%q6jx2%tam94c+??h%?Q4X&ccgQv{85CaioM9c2v{5WaQ|2ldBaM@EvP zjG=J|ZvOg>8z+$QV%QfbhvT63_V(DGE2^ofJpkd6=Qt@ND?1LeK*ev~G(I?v@4}0g z8H|4V6a^ngnp09rV08lmXXDvO;(D3tZzhW#Gb5>STfjkicz8ev0^EkjP|(aWv9lwC z1%%(Uw6su8X<_wGt*W8}hXZR4?@uS;SL=1Z85%5AH7UQOFf0;&;d+&J<$27nwn>n#}^qI4#t$* zlp(U)-QvtZvfus=J3s$Z#Kd-<22c3I>UA>o1$#o`!E)+13$mPtMO)V~A|NO4t1=go zq-Z?a1(n=s=&M#ra^gw~(*8;D#ee+xaUbHAqvdNwmM{JMx(~2hK{`?)c?CX*FAy#V zO$+Fr;IKV8n56=rJo7D^98tiu zwUxH6u8t14D~}#k`sh&;yvRsMnt>%N{lh(2Q8Fo}NPT^MaCllv-cs{L;$K0+fr%06 zzENB|+J}OIX{g!QoTmasdEALj2P!UOn8+lp<10IuJp+aS3w*499u_bG1Z1|otH7+6 zRbD|5;Bi%oAreOSjdtD}vBPeZfP8y-T|0UlGvsm7+Pn-a?$Lu5l=ZSmQbhcz!`s@x z;59#{`)I{pks=BkS~1uE?Hd&+o33pP+ru5o&mPpw%n=aP$N8zA+waj4KiD!m_goQK zZ@gNr;+c6Z{K3t%cz1L9$lYz?keO+ z(5&pevy02_M&c`~TWoynirvun$qq&@|Mhra;i7<|>XD~pM0SPDpl=oEP!X0DkYKjv zF!o3}B$oL-sZd%W02_H3NW&1u#3M@`ae_VDv5=TU-uo((5x@86I8#<%l;>bB~CVx5dOaezL zFz1zg#g+m1+6G-xaM?BCD?6iy`$jNCSZ1XsJzrF}I^w;Akkb6Q(a~;+&sM|a?ehA% z?dtx?picg>l^NjEy#ZGAaitJ!Fe8W%L*Uey4$VO0iWI>JAeE>sYxS{l? zAqv5WZH3)%7<`$TC>RH|+%B^817up%@&*Mi>%UfswuHGJN+T6WcgoOv0gWI8zP~Xi zo@!5ObyvidTs{)#wX|e}2am05-Ab{O-*IoKiV`6ZH}qZC zs@l)2%t%1@2fK42qgI+Fh#%??6BLZ9sG6nMen%=PP(OibZm zN39FQe3y#qN`cM>;S^5ntleF#nHj9)8?~myZ8(l>NN0-|8Pak+5x`HdwzAc87J2hK zK>Q00Y-q(C9BkH`LGtO7Ei*&N$DPL(+1gUK`pW^}L~Vu&4kHXwX_h7Z7YKQ4fWA#d6m zyBGtOP}*p28={n%?p!Jm;vmEH$QFC35Y+9=UPvk{h}=hWNJ&eZSXx3iK8VGUgN=%- z>9-*Q@#c^+~7d3jm?8WRX1`IQJB=+khKeu{LyeUS3|~@LzjrIg*6I!Z$3faA`);Jln3} zAqLds+&U9Abx?=+ZupYmEX>m5+uJY6N}P#nHL{ zCbfx$1yU>>5CGqTpaR%_5n4dF1$2<7#kt~IfiSf}K?R&Q>Id)~2?L!!>IXcMgPYq_ z?5;}ur4v9<`(r@ZZ%>$siHU-h70RcO)E(>I<9eaV{QGTLstah~P@+O;1kVg}fzdNl*~%PH=U#XL<^IF1t%`9TSq5Gy%xPwT{(b zJuY^PfJ^rciJS`5ZvbHhDgzY)i}u7vpuMoAjs5-A?b8sCLaC>!zV-9xVu<}AMB>L9 zTUcbVqk+Wqr#K@O)p0Bx_!Gfo#*8NrP*moSt4OlEj68mzIO2bSXdx8hX50_7g1KZz z>zT8zudSW++^x6JfLKRI2X$Rm77q#$4nU^|lqNMINa&e*;QIByB^38}`V3lsI>~3M zBg`&9dI;f}l)iqV_HVA{2yv$Royb(Kz^~lyniFh3~Wmaw7HsFTGFAG36b2_rwNC9(-K3L5U3M_nBTvT;>V_{ z=l-pffMg*Cz9457+y57LbeIHtU=p^wylgWN=mItL29uoiy~$5hP~S@JS@7`ka zR;BP^tw$)$Yus#KF5DbM6uU8E>SB{xDO7clEa(qJkOtrgNb>)G1+ug9ZLvIhP`P~o zB?g2_5ppo3JbCL38X|H}PtS9_ydg>K6y7dNQsZd3N@`RS=%8djCW1ayaVZ|C{=hcDa5nk4r(9E$Z^10WEU^ zrC;lzX2!Ai@j2(o4+;v6NmJxtq$D~>%NGOcWmvLpPzSF8WDpO$7FA7W$0@$ zP+9zdj17=Mom-W)5K~W0O+A(8fZszm4*mC5@DU2xcx7z4iMe?&pyg0GrMR|Gm)0#< zt$zb>)1`Xy;6>Bbm8@a0NF)jlG3b^-hEtvh%6vv#}x^W+!}ZS9co@Nh5I1BgI}<3^spe*2aIGQA;YK2~V+gH<&F zgXG}o*Z?v6)07k=%Nhvr5Yq;mXz$kE_5q3lM-SQPX(JyqdlXhnyR~#q}$YHbcUe zlapiRFpO3pO}ns_lMV%bXQ56jo3dde-Hm7Ace6W5iT#bBf`XGLi$md`ia$4OccgLfZ9TGVgv#qK?gUIVJ2D-`1^ zgLE8d>bEb39+BpG9|7icgA6->k7gQTg-&`~&piM(CdGI`q?OkBVNz9ZYimnTc-3k^MmxNUF=!xO?k{Y7lG<;r;to{ zW212M9Ta3C5GYVtwI=xIw#~OXB=(k%Lni+b0?9Pjf$=SaGAp$}s`84Ej1Vc>7!lNW z2s9x)BZeghnKxM+suuSA%>ft-_zl@o6U0hlUb?OELU^qX24~4e$0)GPKSSU&a$wjhQmyh(^8YV_? zdR?buy~v1(Cy=$dW_s1H=V|w?8B-^GRAFHuppANZXuxtBhl80K-&&bKB5wD&1E>)P z9FBY3}Eyz~K!^V+*9d z%TQ*)2YONXB$VL6I~SLgHG`EZ(_PZs94A1;=V3uIFsV8Mi7t|O6tGoH#Qj;dR9Q@S z2kUVZSZHWDpituz6JTHAp@a$5CdS5~KZSSQdAlDL1Ig~Sv_6zCO-?o|s6koieuB9* z_~VZq9U$!W`x}hOqMiZJ!Gpq2MP41_zdK*j;|zWg;vYEOWJ@)eO*f+O$tW!NM1E1- zSB1fX2md_DHH^_mzL-x{md`V@vH}D`kQe2Qajaa>36(j0?3NA&=(W?k>SAK3fJCWD zq0N}IH}3<%AfG4nN?fM1dgQs@IVA$1On!tslyqC$RMrS`%~_>Fn)IKAu(I$cjaSFwN~*8uf{XGQ{G$=W_{R@a+I z?3QO4z^`BCxTmeDiJa!qBX#9%=pQLd1Jb+iGrP9D=#gVkE#odH5D0ro5Y~ouPxlY_ zgA1}vP?|3vg#SEU4Ne1m8-r%Y`YKS9t|Z||)>7Lt)k6x5q{gS-bcu1G3myX;7T^NX z9;PgSH2ERM>Ki{q7*T(?zvM9yAq4(E%WHI634`fN1f&PFMoCqhcNTW}eHYbzgm$>X zoe~~G6BdD%4Y>0IT8vskB{`ydhzk%@VF0`Y7B6idQxigfsv&>?$tJl5K#0O0?C4EG zNQwX?V*-#%fao;>8YuC3izGBTX~n!B2!sF_fs(-;5WZhtEFmyZ1WbBme))%!=ug%N z=zIc-;lpy2nIn4Q6kMpP(7g#T6V)Bng@X^amHLqD6z*puU1R_$7$b1YFLgzge;0Uq zhmf3H4(#O0eE;P-4fcn4l!Ghz_Uf1KHd_IFZ04E%glp027?g%brR!{5b_bM94tA1v z{G$LST#!5o;3bn@z6*k}he(gsaJtFp?ZVKFe1Z8IRsdDvVd;2pWL2KS2UM=`B4buu zj7XXzoJ&E+$T(3@InoH#<~wqBThR#_jO0<#t7m5x_5Z;F(6KuT@muUsBY-AAv^wi+ zL&17?WFMpkMu$g^1HMbYuV!o}-neYZIK_EX8}%)W25 z2>kyL6#Iq;kOuV~Xgq9vON^zv<{E2~-J%el#Lo=z-ESqp9g$$^5)UTA(=7uKoUC|d zc_@HH6x!>JCtnciA97wj3}D6H`~bKdxY5{RFyJ!~{0Z-cE9Jl3hZEm&A$8v#=cZ@R zp1p5tL)t&DBz2wl@Zi7{hzoST$ZpQOgw~)qQMkY!zh549PU7ZENN(^*Sh{95%HtaM z*55(*9}s*F?;5`iy$~3s;mlHJ5@8H@9ZiHt%Y4u;*ifVi++{mm^|}#&URQHhB5#$Q z3nkQ;?%hsaGk4;WQC3m8`k;VEh|jS0hYAMM0?p&^zB42`W^l1ZW!1RCD+pMnazBB` zKh+oivMPownFRXDk0R=s0f!1GIRyBLAacFO6t{5op?}}t*ax_u(*Oj5OYBv2)QtSJ z1T1(e{7lj_GJoYmeg$$RSa*$>sjS*b>8ryW3u$YNG`Wd6i$z z!-ZD4LqzplJqXb)zg=;_g4{Qw6F$T26YFwdocKann zX8MVBoMQN00a*!91V9UY^AI~bE#Q|R$_Q9Az%G^ScG4pk1 zn2|jLFwxr98=(#FZ8H!d1pV{zb|cR>Yb$?1v4@%4R3c!*mHfd6+WX?e_~AvD<4O#AG8Qu($G0+@?RK(O9H!{*zvi=_aE zKFz$=WG#qF_Zq~;mKc!ady=(oDU>?=Hl+rPVG!VUKt{SRpd)zN z=!IQ40MOb{Qp)~9^xFw$34UZ3|AQ1S?b=_5<)$;E`zct`LnU=hV|6+{0ih=cj%w&K zR~zbUe0$N2puoT^IQkcTFQx+6cwcpKJ$tSit4^2oharP@Rin277s8@t?Wu3+VY1rp zAb|L)1*&J84lo%J7GU{ea*tRxoS|i*a;Z;xrD-d-D#4NX_X}k(2Z-GP+3K62h4q}F z%bU(}-dZ;CffE&cvX4PSekN`1GCCwoT(ON~+GzRPWIr9iH$e)OlS?QNiJ)IK0L=YE zPQG9@rNA92^Yx$$ur~m~{-_Y>6^QAAmvuCKti8rJ973r3b)XWRq+h8mU$xIbsd%=;`+ZtR3Qc0lc>Yt2{IZ z4+j90o9}zPGPBY$uhx(yPVKDr`S!#nU^n_ET1Rfb^CVYLx!?h%yp^4~qP{44w2xRe zKmgT7!=?7!H8GShKQTF*2OzuVb9Pr0Fc!SG$-q(!9tdZty>zB68WXo19$u`s-jH6MR#&}&;V$b zLqNg1Vn0~}4R&v`b1dS$Qh!Vl=ZP;e^bQ%mIfEN7WCW%sU?s-Fy4G)7u`P%fK4ihC zpX;a*2m>m{ucvr#tt+TVD>--XU!|gJT^Mv7CIk0jEw5s>I|oA%uK_>_3xxD3)_kR_ zo^O|rJAE{}M9Gk%F@3Eh3xQ#RX*t8Ys+zPp>s?S+_3GRMBdbg9qZI zz_NFu(d+5<>faJOvFa~iL1FSoX99i0@BCiVH;U)FYhM>MoLDBLZYu`vK9=Ww4}N-a zqnqz>NbD2@bv-rr&I#;_Q*USTjHDED{?~h0E}T4ds+V&Yz#DE>N*G?Fy6`t|F4;ay zG&KGQ2~vHGNQtB?AB--u%MkaJx@`l?dT~m!97k1%#6s1<~J#|49t~ zlbqP3Krsu>nUv4oMKC>O+L)=1lu=Hn;od(<_i2eCEwy_Zhj>OQ-7x0Bn4pPkcFH7M( z2nTWy7nI+C`&$)uL)o7F+BIF9^~|FG{bqLF0G)#@zd*ZO5_w!3%>tT z0Ym$=yowZg1ei>DX^C2A{*%fz4iIx}-fV zRx<)~GYwo@F6p*kbOouTA=`I(jw7;RA9_gR4?)6&nL~3n?yno*ACBtIlY1t@YwkZrB<8*8_VG5qJxw_8%n4At4dwx} zlug6e18<2g8(5r%`^QtTy3CD`%3@{dg!e|5Bdc@R(GlgWN%X-b5{D28QMtsrVpThm z2p+x*0-pzYm|x)T*t0#39n!^6&(^82lv9^R>ie?f+Yl%L>_iuH{r{Gc^*|81y#;&! zCg`jttaU>c5q2XtOK^t(&z9xKCx_KTYCS~-4M znD51tk81?VNkTNza%7UGhB(T~D*UEvLd&Aw1Z4Zn%L^}HK=Iq{-Bp9>hQdnb+T*Im zS*^+d(T3OPe{9*%+>Oh1{N0%61_glQi&sN$SDT+*8nw~Lkd65M@3vv5CLYxuUimZ1 z`%)7hX-lPg^|At*5VGvs)9cV!G6lEAQ1mONd+SCrl!@3sH9Tw&hrpROolj5+^HnPW zYQgHI(=&E(?-Z&~7p=B8Whs_>Lw9rO#SXl0-`4$cZ-)My;#Y^QM6Pk?alzOWm{gUR zI=wXd%P2DoiYUlAN!6nUX}8!eU5eh-yVt^}KzhSD0Kx{SvY&>=AyNb00JIS`OwFVJ zG*2YgU#E1}UDkzj14q2(coIe2pLnaJWK4x!!ZY&OKB4b#5JD=_H*tBcZLyE7sTiHOp<(sIML;UEe zvjkl5BT~SVD;u?V&jJKxArbBb=Ly)tUq|vPw2~eTMNhrc@=JOisQk>bV#pa#rHeXyiN;Xb9EAqSv)C zFkun`2O=?s3%q#o;(c%LDWo3@9bc%K9ecR6)$^Z5LbL1*a~caf@}K)cuR8sN?MWrI zTwVXR12j8!l+@HHq3aR~<=qsj5NP`z@1)AzUUJ41S{O;bReS(D+|c+H=NETyNOuPH z*>uYy27$|$u1KX6?K&LITv1u7LcXgvTxmR8n>*x;(Y|NcuzvWhFr`swJ`wATVX!gs z&1!+y1cR~V_JtrLAJe||;zh!brgqlx<*PT}Jxh2*#pSfG-EyI*HULTT5X5h@``&-) zG?;P-<`N(+YotX+%O&MRnXErF$#4X3=qdFxwYGi)Kp84%qK9EKdOv<7fC7~lJGw2R zxce3?GI}R|#zsw^M>TiV3l>^RLrS;%plANVGNE_`2u4JoMp!hZ_dfU1sFJk$w`%9< zYxVm8v?76nyb4vD-TvE?e3*N4Qf^Yn(E``m zD<*VkkJ@~Xr+AwvMbZ>*yeL0I%|csBQCJlT?Y9ws>97#QIG9*({%{l^zFZN)cCmU) z;FSQ5YBx4(C{mX5B}FZyPSEqvgt5PV#ycbgE82IJ~f@Tl?^?a zkm3OL*nW8}0wi`JM@hOrq+H|CMx$wm&2pIqa828@t5HEI!)Dq_CcWsP^n2R`h0 z^v&an%JuoPiIK}U-YTvhO9i?d z@ggTc32r^hFzFVSBcZ#5>aFPIzjUQBl`;_VJ7D|cBNxSNjqlzIN;?zk&{0OHMEapm zx;_Nd-fLs_fyVC`u(1w`ekZ|0H`SW2F5e`k_gdd4VstB<-l#E|_F6Pas%LfyEH6MK z{%*2{9SI^bvg&-Ie^Z&*+C-!_x58XLW{w+)C;sE$>*hqkrAO4>c1?(j z3m@VOEHI0O0SdvI{XzhsiIC6_(lHP{`L-oFH?(k+gwp{(yc$F_Y73-%^+{Dl2$D-s*_o<+I&d-bbm6tH*20Kw~J93u&@k(O{lV;j1E%KoTff|p?!Ci}MbWo;J0rVWMKsyO zErd(eTIuVL5-%`Hu?X9jveCPsY2ezUk=qtnb|^ywScjXz9CokVNh+Y{}S> zxtP2CW?2)MSp;RUbOj@IVYeoVl?+De57vuzA`iF7-@zG+u7amg1S&xcRb=~23A1yv zmL2s&8d$oBWI;uvQ+{dwwx4>9cIw&8!bjp?ng&0_sxa6be0DJ}G)&#~cdYk1T)5Gu z`!oxFJh#d>tv5CnH7a^9$!7I;YFwFJpY9xgr9bcf*6Xw^ugE9?>`qdmPH62bk<*Kv zJL9gp71p_@(@z&jx3{B{9}}XZ)S}S5I>T>VmFUf0Kc=Fcb-ZDbgCg!3ndvl{N+9?K|?seTklD%Vs|h$ETmj zzg%YBfn}()gtc7r_jSWg=-3-&MwW`d;XU-pEw}EFZ8M55T>mil4DMEU?sje>vkk9s zGI39R#a$cePhLM4#9T{)vV^Xbj|ASDcBoRuAJPof< zS|T)ShYU?xV#xeE%&vaIZQoVq(97Rms;%GG-_x=^F&7Tt1SpTnsLyJIbUM!cPU)Nn zbDbUV3K|k=7K*czUJ#_@T&rhs9CKruh2FDpfwDh)e)BPaJ49Q4{<^Ze8g|p_J*u%UTW%fpfXg0)3PSly=WL2^M{7?=gUibU-P> zS=P4&1O&n&BD4U8wzEFRWfo4xD*hS}GVn!}1VIG|sx3(ow~)YZ*n5%W4HE`AVxtJMF{4@{Ta{O#vZb{eVmg!_RI zGYHuw!qM|uYHDfrb=$IVBt4=o9M!*5|6K8yT=97Qm;ixAbDuvS)i+m8<~a8)J96Cz3b`L+ZN_RALX>ln=R98l9(Qp)oH=T5ZM5)XeO=w8%*@(m);XLd zze>@~q%P^ZV=-sYnDr20>xt`7u59EVGC8Zk_m!*WiC&5#^bceP4@_g&dOycYHms&U zrS|sJ?n`typTCBFX_%RS`pP=du`ly*G_2I>j1{ji{n1{NN|w+kf|V0T#19ka!E!#I zFR6!c%^q%kmRHCnaE+I2ziLWQqS2Q%%_y#Iw0rw7Bf~Auox?Jxej`20!F820uUkqa zVbXDaVAGnlpDoLC=m(QD|&~3zdQs!3$uQ+1>Zg zHnfaH-1v`ciW}N+tY9y^uLf2|iJ46QdFj&`T&p)v40{EyDYai7C>2{r*e!(35UPp{!0m!@YkuMq5I4}E&v))3$}Acn&c zRZ$UlM#+4aK$F=lrK18i?eYV10tMW;W1ug53p#J?8t73RH++A5&5P+=0I1sEUf4G9 z{F;RYUg^#0B@lUS9i7|qPQ6h?9=%db@~;0Fx^rXOI@P73L3L55t{DTNN&RIm#OJ^II_Q;Duw*Ut7w1ThF&qX&ZPQCT1M(QZ;qg$NDxk z8RS~MRtD)z-I`obI>?ucN@7cIp^Qe`*0J800H4E&AquVqezJ#DR2*jBf8Er5``LG? zZR2A@*REcDeT8R!-vu42?JpSM#bsu0 z1_xj`{@cyKzy~;y*$PRY-+o@YcH>3^P7z+qEUZaMP1QMm`~gH!aMh|b9J>s=W!>@I zn^1;Z_a8j?lEqF2^90d+7_t4*4|{K4U&;0B4}V{qJq%6vZ^Z0!T7)dpW@MPfK(`Rh{w6~^rq~fd-tq9 zF|PXg<&3$aE7Es`{+IxTtmino{T{mz1377FX*@(sUiosAdc5c#Vj&MFm$Z^Zvg6#> z@NE8r5KYU~PT{s3iha8Wk{#1q?4BG!+8%rNtvJ<~?NR-|cseFKfjb_=(Rm`4&FthO=6^f}nut+TL$r!w3;%Erj1E|JsAZo3U4lJj9L{IN-Y-n`%r5f9|;vzrU=3>_; zYmpkOrkBWUdKg39pAB;_DK35xJG!~Vy+kk&T4&uqv_;DNHrJ=b`=sA%1oPOXqGK8; z%cjzHP@z7EmutI9`^z4H2&OD14C@I$&2#6n$^%5tQqpsB1`3I{;*csIYWiVRoY;nG ze~0#|TpmO0VUS1-??RA^%ql)r70QI|?Fti~vtUYVB@MsSX)AVx{bml>_wlqy(hsov z={U^V6g8uIO31GZz+v{syhDMxfNxkIM*F@Spk6)+75{iaoNDl)K%BUIVwHq*agXaz z>-oOAxa)$alNas0PjPuP>Q;iIQ>`dN1GVSAwC@2_c~wv}d}-y3PyU986)XO}UDlZX zj;1gOWwqI^<-tOqbzoj$vkZEk(g!aB{DjI=lTl=wM5tIu*p~7QGBPzZKOn_DdoCkm z&Sp0{^c(`Tt3^%lM;U=Hl&P)cg9KWQ^Y1Puj3Ke!772ENSI%;zq@>au-YG@#w0z?h zk`xmwZEn$W)y+W}-%pNPg0IiqYm+O|j+BE)ChDqmfj&`LYtUJI6ir^qPxI4^at~oF z`Y6h~4Q+v%g#=%(xqef(f&Ys0CDHy=-~H7KB{t_9D&`#L;I#H=G%$%L@I6qglu?d` zn_F?LcF%m2o4%rbgcc&x2CP)-ki*J&uz&u=-6ZPgw}z0UT;(cctS()#0zm(Z2Wg~c zhQ@Et!g04{<>h_14w5vTod++9uKbIA9wmHGts#~XufKJ@WE4JT?KguG z?q|~^pGsWE`=X!&%4uZq@W;FC2PRT@Cp{E5uaB z2oBHH#vz~!{=RwHNNhrLJvGF``{+qQpYwZ2RpGma0wY!?YCwgbg^UpAMQ?jn4;L@cA(ecACquTm|WxKY~6ib)-4RIwpeR(uXz(aA^GkWy6z)#sZ%Li6GT71 zfsX;jX;JPc9Ni7<3Y$X%dA2TmSe+}f#?pHnpKtZ*o-AfjYu`RE8oMtJj*>Obm2Viy zv1>JQD=~+sq7A2mD&2zhUZ-My>8tl8g-EEt#BFDq4SF8Vy1Z|~B~;=_NJ?r$tvZ$R zZ{M(UXTDyB;EU3ly83!uKwq;W+CQ*h4<0C^;9Kv)g*?KgLS8vsDr}an4&$$^X`WQmBUI?Aw)0 zFLvDl05?%GlhKGS@BC^|a*71I2;{3GPqi|E1*Ez_O{SIB;Iw!d3%^3Mj6eSP<92ws zYqdr;Fo{us_Hh^Qrzs0JDMa>P3CO>vu|!l9*FmWgGK+$#xP%1Nk#dyavSmg+HDSdG-KeF6vr<3C8L3%mgMDG+ z6`GowDl8;|WOYT`#1*o~_xTseG8i!j7fI3w&>bs6k$>yjsS$*$1g&R=+NQroYPgYj zi3sV1#uC0Jvo*DNWST%0QnXbP7E)|m9;Hzus+iH;Apf?M8?X+u8Qdv*iJ0=;O@J`+4VxYg^JekqZxshN9JzfQ^RC6nY8+z-gs6ni7Tv-JVT* z`WtH-+I$`rCW@lMgAReS6`6}hv?n>mbwhirP~@Lh0br0-9&sODvvIK3w(<>IwrBzQ z1;)ZuG~{jAgQbT0!||EF|Bmg8uLNghsUR@@mGpO(k*@Km|5V4s_lN|^(`)Yvu>l1S z!@Gr@dcC6OZulp{Kibv9ft}D&QTb=ZV|N%sKwb@h$V*E+T+gPJTaPZ8G`oXRI0IB6 zQhmZaq?;P|_nWt|`l0sOFwf!aVnoy=6%>lq+9GFo55abrn_>9RDL5wAKvFN&{~G!iIqNgR6Bq!PLbkCu0M?*;KEz~af5cavcE`Wn*i5B9e~-0SM< zZij>n|D#BUP`ECey>lQkbyWXis&^sn1^C!cQ$? zx3%|88}vEThf0nO^^F9p05YmkKDt1juH#D}0*IX|8WKc8Z^Pcfyi-3m$znP;BEhK| zo~XUuq!@)I6&9Mv6b%I0<#o(Gf<6b#j+J*A?xiCU8b{=yq&WnVL+O>zo&$*987z^jO&Vpo(F+SJGU z0}yObfm5Xi3>)j{8%j`TyGbZM-5(bh*Y`Tc7v|tjP!K`5Ve1+OoMj~IhD*tCwZENI z0sO6K@Iw8R+t>$b$sLsTy4H+y>J-Ne>Pq&aH&O^e#~l=PrOO}Q1m6o#Autw-)mVHa zV7aU^xWeSG!wlcYy5@gH4gW;#VPGs&o3YRapm+`7r*$$i`!$yeX1-Tl{npd%z+N^= z-FpG2^=cw0dj}f}SvH(BxQy84Kr|Upmcl~>+vZj>ekB7%8_AH0NQaT?@Le)LO@z6W z+85ijOAN>fha#%MOn-v%g-eB)&M=Z%TILux*GKqby1kZLmjdDzfbwJGkg@zz>a$of zTu-Gz%X+){v=GXoWVB0$jliws_U(<#jyX$b{M{IKIT1UY29G}X($8}VzS`mj?z6a@ zC*S-QISc0M3O{2X#Kf?F_C)TYgvpl;&*#d7)1e<7*|7}$(R1a`e@=}Y{U9DHXL+N( zG)XA6gp2D5W?OK0Mf7JFHX4u?;eWz{*DyQik`3lu69$lvW`~X}+ zJ#4)Qw_$78Uwn7ejPG827^!XfW3()TZrqZ5DbX2VvIEC3oV(F(JbsJc}Pwf zvm`)?RZDevQ3I*Q3 zpHe|`IZQ~yl}g0Ehs9Q0b>VS`jXkv zzI$rc1?u3%L=l`^f8qBVvF`30X+7cRod7g8!aMw@kyc6@EQn>QE-AwupZrEKDs@R36x76zKs|W4`j{yh2N2 z2p8`~s7fvTJK=Y$q7#&lzc9HD=s%n!-1i3V9^k!L(O|Q!+9IB9n?+|$s0zXKf<6gE z*9y>+BAoPlpQe^pV0zwrK7P{fCT4WhvU>qs6LLS1N7)r@E^XmAu?vLBmCQBKq{HFz zx`ddT(U|MNY44(l(0~8Fcv92E(9ZI^SignIdq)sSw1Hl@_zQPZMCS1Rd&%%y;laAR z#;~1^PS^)2FtU?typY@PC-3CubbzE1i!b8yK{?WWET43teHP{ChoI)Hlh^A@_pCam zz)gt1wTGIXj)Wui*1O@Y>YVs=jk6zQNo$4K5^RoUq+Yl<-kN^uoz9DsW1|gDBHuQqq$9Mz&?DRg@%Z4@yeeCMX!> zF_?PEay&ddw7#SSCq(5oQYfv=j;ILP$rafuAh+ckJ79mys&OqfMzNNVUA^L#R>9Ly z@pPoO5%b=c4hcX(j*dku>)GQiWgwsh)R zlg}si$I;Xc`hEo4OMGEJU#SX@qRZ*}oA*N_Z1@*_&9)f~G>r)(Exa;ka+UmFo=lg} zsvPC6DDlv~oWW!o=UP=sCm4SWL@=U{@KewfEsVr>Wx@3IMQrPcyFII%N9XSl{9#nU zycu89^3mzKX-;hH+6i}StJTf=y?bsRpPX)od^c@<`!}b8^*#@3vDK@c3(BP7NE&W` zYwtO+ukPF-HjQkz*o27)Rt*hd{Rlpr{8_^KRC>LXvl<$a)*{6(8K7Ek-Xf9{Z3pkH zDUpy4#ulh3xu8we|1Cqtp`fgqXqDIsUWxGboj>svIl8rrOg~wSp7-R+Mmiec^gf#> zsso&zd-CP71Yb{qzbA#W^BfWJ+w2vJBHdolBd^c8j~J(!6*V(ER(nk?V4es<4_zk` z4IkTuM_hRS8#n;8`PsQ`Vcxvy2_m9%YF?peHJy95%?u_mLt*4&HqG5VPiW>P>=icf zN79h`+zZHnNRm-ek&N)dnWOE4TTG$ppfI(78Gj$VkycN?ug~>K*(vWr*e1PT0U7Q* zcmxKy(If_-ui?!I7h`%q+|cO!oSdQ|2fxD9`OQgT3e73JP{DrN^!yz}w1r2Wj_Ox6 zeN*ZkkZ;Rt8+oMX&KxH^R1ms0%{fV+24g@i%=tjRtXej6%FgUX@mexLbWrf6^1%B? zLl&lc~^MA?XlK8W+{C6qy$Eq(IKS7I^RuoxtifF7^%2n&ULIECG zaIc}E34e_UfVlJ*>BH_jrq8;p`gD5Yo{?(uCEur$FuxEn?v=Y7*+;XxZD=w2XCj7!?q*17Ju@ruDc-ifW@ts!Hg%55k+TDRx6BpeWww3N z#GkrlS`x=cYQ=2c{v8w9CKMAAxhdDIziXBl+x_!jwE=bu>#w^m$)&1$cSinG<`arq z`&Ro>{syyjMD$ptixiz6{q#`%R~+nk$>TjYc#niCsD>9PV6fW+@7UC*cM{|AJaD8o ztfu5<|E;z})uEEG;RoY&N-Hb|o=bX?JKPm@>Q@%gI_^Ac*3n58Z54?>=*GD>_O)gc z`y*#N+3x-8$|Fz6k`!YMU*c1?pm5}9vGDe$(9$i6&N~(EY(g?ZzJVQ<+P8 zi5D{s9a+=}*XT$i#h(X@*a0=*!awW#rj6irz3il<;gOt0aJHUg%jM?h9gf^o&A7%k z--R(8PQ;Nxc)h<!qfzu0Gf5eIKA_`dP^}70|afnPLvlUAB~{=E#??k zvz8Vt=~(`TgZHl;N~1`XFX!}jXM(KzNAYjtjPKt(Ze4SIKO{WdytH-2V#7{~=nrpo z_f_(0#)m2ygNxK7<(*Ql9#kJMF*Cxxs89?-vJ8PPVZ4b(y+qOgBoYqi;{j-n4P-Kr zeBfBXfBE|J>CJ_eV+ir>pwJJyxjgcd4ZZ(0?$PbrwqFvjl9i8)>GgCjyJ!Etf}hOs z@neK9zSV4E5`X8uGb=&!bwU5-Q$76Ti{_CUo$E>bu4k@o6AEU!w?!rpuflaD{4Sb8v0R&io?3-5T(U!k}}nP2$#f0x(-2kAPb-_2-=GjN;J9jDRZxM7J1VCz2P(^>Y^d5dWYpHZOtS?kUsL4-H<_= z2mj(}eVD(Kt^pr>{`Aj;s>jd2{r~dwx~f;y(d*lF&)?Z9*T`HpoBLmlJbc77I(OHN zU1MGM`7U+Y$>@%MIGtd;fOq-ylDgQMMch3j|M7P%=b9stkKg>qBev)ZF+cs} zx#2!5f!n-Zu_d`|=aVoy+0{p!ek(yf;n(LI% zY5qLvDZ^LQ+3HQf?+M29c{fasB6RM1Pj8+>Rd2h01Un=Tvfo|Fxcw-vmU)+(`^Kbc ziJ$fJV7H!Yo)=|Xw27$no9IPCf;(CdDTPnj5bL8^4RDU!Yw89D(u^K4Z=QP`EI+nC zT>d=1nNjCIk4$k*p%oSHQ*9L7)vBx%K8+PUBqUREc31|#8{4GL3Cg}R@XTmkVX+rq zmhjLsBfz4DmdGw^hw4U?rCjM1lOi^oi=G=31t8cTWS4=77JJFl{&B{| zRc|WQ5`Ve@PB?-)zp-hhhujpDSZx*V_QMbXTu?0ppDsp6zgL7FkxgY>gC={w8$1Q+{R`3>tpXZB2&i@8A>og(O~6O2zLvp0f=~QI43Oh z-o52eJZIk((oFu~>M5OEHP0xjY;=514qZNMlZ3<}FjBc=W-wvI3(@NeY}r3O-wgwH zI`!TZdpWoL@YD+P8VN12S!?|!>tsX62?iNSM69vwaP1{X8z34qnUc7Arng1ohK(CF zj~!bhK0bTRwaINemrG#j)a-C^?YaElekrh&m<=th2Pg(i@a2BGX6mi>I+LL^nScrl z(~ws^-;vCt5s2`Il!YI_ph3z|$WF(2goT#>wPtpr*z{d=^oFoDeUOj7SPf@iAV*>q zg+}KixVuEgh|YM^dXM%pdA6@36-_p!Vg`~HGyk)p4$gTTc-HO z10*Y5MDPir&h;t;2}_WIh^&HMPQCiTtkq0ccU?)_1P>S84e}pR zPvivLS&KL>%XjNKCfwN|*#kHxQ3F{izzVRrM7-KN^v912;4_~N?}88s?>%-b8m*>n zM*p;&3_8^cOl`lXvW&}RJ>N~g$YMP~Od^B#u7nroD{32BF1N 0.01 + for: 15m + labels: + severity: critical + - alert: KubeStateMetricsWatchErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + summary: kube-state-metrics is experiencing errors in watch operations. + expr: | + (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) + / + sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m]))) + > 0.01 + for: 15m + labels: + severity: critical + - name: kubernetes-apps + rules: + - alert: KubePodCrashLooping + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf "%.2f" $value }} times / 10 minutes. + summary: Pod is crash looping. + expr: | + rate(kube_pod_container_status_restarts_total{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) * 60 * 5 > 0 + for: 15m + labels: + severity: warning + - alert: KubePodNotReady + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes. + summary: Pod has been in a non-ready state for more than 15 minutes. + expr: | + sum by (namespace, pod) ( + max by(namespace, pod) ( + kube_pod_status_phase{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", phase=~"Pending|Unknown"} + ) * on(namespace, pod) group_left(owner_kind) topk by(namespace, pod) ( + 1, max by(namespace, pod, owner_kind) (kube_pod_owner{owner_kind!="Job"}) + ) + ) > 0 + for: 15m + labels: + severity: warning + - alert: KubeDeploymentGenerationMismatch + annotations: + description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back. + summary: Deployment generation mismatch due to possible roll-back + expr: | + kube_deployment_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_deployment_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetReplicasMismatch + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes. + summary: Deployment has not matched the expected number of replicas. + expr: | + ( + kube_statefulset_status_replicas_ready{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_status_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) and ( + changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetGenerationMismatch + annotations: + description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back. + summary: StatefulSet generation mismatch due to possible roll-back + expr: | + kube_statefulset_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetUpdateNotRolledOut + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out. + summary: StatefulSet update has not been rolled out. + expr: | + ( + max without (revision) ( + kube_statefulset_status_current_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + unless + kube_statefulset_status_update_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + * + ( + kube_statefulset_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + ) and ( + changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeDaemonSetRolloutStuck + annotations: + description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 30 minutes. + summary: DaemonSet rollout is stuck. + expr: | + ( + ( + kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) or ( + kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + 0 + ) or ( + kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) or ( + kube_daemonset_status_number_available{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + ) and ( + changes(kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) + == + 0 + ) + for: 30m + labels: + severity: warning + - alert: KubeContainerWaiting + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} container {{ $labels.container}} has been in waiting state for longer than 1 hour. + summary: Pod container waiting longer than 1 hour + expr: | + sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) > 0 + for: 1h + labels: + severity: warning + - alert: KubeDaemonSetNotScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.' + summary: DaemonSet pods are not scheduled. + expr: | + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + - + kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 10m + labels: + severity: warning + - alert: KubeDaemonSetMisScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.' + summary: DaemonSet pods are misscheduled. + expr: | + kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeJobCompletion + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than 12 hours to complete. + summary: Job did not complete in time + expr: | + kube_job_spec_completions{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - kube_job_status_succeeded{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 12h + labels: + severity: warning + - alert: KubeJobFailed + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert. + summary: Job failed to complete. + expr: | + kube_job_failed{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaReplicasMismatch + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has not matched the desired number of replicas for longer than 15 minutes. + summary: HPA has not matched descired number of replicas. + expr: | + (kube_hpa_status_desired_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + > + kube_hpa_spec_min_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + < + kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + changes(kube_hpa_status_current_replicas[15m]) == 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaMaxedOut + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has been running at max replicas for longer than 15 minutes. + summary: HPA is running at max replicas + expr: | + kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + == + kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - name: kubernetes-resources + rules: + - alert: KubeCPUOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. + summary: Cluster has overcommitted CPU resource requests. + expr: | + sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) + / + sum(kube_node_status_allocatable{resource="cpu"}) + > + (count(kube_node_status_allocatable{resource="cpu"}) -1) / count(kube_node_status_allocatable{resource="cpu"}) + for: 5m + labels: + severity: warning + - alert: KubeMemoryOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. + summary: Cluster has overcommitted memory resource requests. + expr: | + sum(namespace_memory:kube_pod_container_resource_requests_bytes:sum{}) + / + sum(kube_node_status_allocatable{resource="memory"}) + > + (count(kube_node_status_allocatable{resource="memory"})-1) + / + count(kube_node_status_allocatable{resource="memory"}) + for: 5m + labels: + severity: warning + - alert: KubeCPUQuotaOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Namespaces. + summary: Cluster has overcommitted CPU resource requests. + expr: "sum(kube_resourcequota{namespace=~\"(kube-.*|default|logging)\",job=\"kube-state-metrics\", type=\"hard\", resource=\"cpu\"})\n /\nsum(kube_node_status_allocatable{resource=\"cpu\"}) \n > 1.5\n" + for: 5m + labels: + severity: warning + - alert: KubeMemoryQuotaOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Namespaces. + summary: Cluster has overcommitted memory resource requests. + expr: | + sum(kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard", resource="memory"}) + / + sum(kube_node_status_allocatable{resource="memory",job="kube-state-metrics"}) + > 1.5 + for: 5m + labels: + severity: warning + - alert: KubeQuotaAlmostFull + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota is going to be full. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + > 0.9 < 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaFullyUsed + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota is fully used. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + == 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaExceeded + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota has exceeded the limits. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + > 1 + for: 15m + labels: + severity: warning + - name: kubernetes-storage + rules: + # - alert: KubePersistentVolumeFillingUp + # annotations: + # description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free. + # summary: PersistentVolume is filling up. + # expr: | + # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # / + # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # < 0.03 + # for: 1m + # labels: + # severity: critical + # - alert: KubePersistentVolumeFillingUp + # annotations: + # description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available. + # summary: PersistentVolume is filling up. + # expr: | + # ( + # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # / + # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # ) < 0.15 + # and + # predict_linear(kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + # for: 1h + # labels: + # severity: warning + - alert: KubePersistentVolumeErrors + annotations: + description: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}. + summary: PersistentVolume is having issues with provisioning. + expr: | + kube_persistentvolume_status_phase{phase=~"Failed|Pending",namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 5m + labels: + severity: critical + - name: kubernetes-system + rules: + - alert: KubeClientErrors + annotations: + description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.' + summary: Kubernetes API server client is experiencing errors. + expr: | + (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) + / + sum(rate(rest_client_requests_total[5m])) by (instance, job)) + > 0.01 + for: 15m + labels: + severity: warning + - name: kube-apiserver-slos + rules: + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate1h) > (14.40 * 0.01000) + and + sum(apiserver_request:burnrate5m) > (14.40 * 0.01000) + for: 2m + labels: + long: 1h + severity: critical + short: 5m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate6h) > (6.00 * 0.01000) + and + sum(apiserver_request:burnrate30m) > (6.00 * 0.01000) + for: 15m + labels: + long: 6h + severity: critical + short: 30m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate1d) > (3.00 * 0.01000) + and + sum(apiserver_request:burnrate2h) > (3.00 * 0.01000) + for: 1h + labels: + long: 1d + severity: warning + short: 2h + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate3d) > (1.00 * 0.01000) + and + sum(apiserver_request:burnrate6h) > (1.00 * 0.01000) + for: 3h + labels: + long: 3d + severity: warning + short: 6h + - name: kubernetes-system-apiserver + rules: + - alert: AggregatedAPIErrors + annotations: + description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. It has appeared unavailable {{ $value | humanize }} times averaged over the past 10m. + summary: An aggregated API has reported errors. + expr: | + sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4 + labels: + severity: warning + - alert: AggregatedAPIDown + annotations: + description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m. + summary: An aggregated API is down. + expr: | + (1 - max by(name, namespace)(avg_over_time(aggregator_unavailable_apiservice[10m]))) * 100 < 85 + for: 15m + labels: + severity: warning + - alert: KubeAPIDown + annotations: + description: KubeAPI has disappeared from Prometheus target discovery. + summary: Target disappeared from Prometheus target discovery. + expr: | + absent(up{job="apiserver"} == 1) + for: 15m + labels: + severity: critical + - alert: KubeAPITerminatedRequests + annotations: + description: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. + summary: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. + expr: | + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20 + for: 5m + labels: + severity: warning + - name: kubernetes-system-kubelet + rules: + - alert: KubeNodeNotReady + annotations: + description: '{{ $labels.node }} has been unready for more than 15 minutes.' + summary: Node is not ready. + expr: | + kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 + for: 15m + labels: + severity: warning + - alert: KubeNodeUnreachable + annotations: + description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.' + summary: Node is unreachable. + expr: | + (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1 + for: 15m + labels: + severity: warning + - alert: KubeletTooManyPods + annotations: + description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity. + summary: Kubelet is running at capacity. + expr: | + count by(node) ( + (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} == 1) * on(instance,pod,namespace,cluster) group_left(node) topk by(instance,pod,namespace,cluster) (1, kube_pod_info{job="kube-state-metrics"}) + ) + / + max by(node) ( + kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1 + ) > 0.95 + for: 15m + labels: + severity: warning + - alert: KubeNodeReadinessFlapping + annotations: + description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes. + summary: Node readiness status is flapping. + expr: | + sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (node) > 2 + for: 15m + labels: + severity: warning + - alert: KubeletPlegDurationHigh + annotations: + description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}. + summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist. + expr: | + node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 + for: 5m + labels: + severity: warning + # - alert: KubeletPodStartUpLatencyHigh + # annotations: + # description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}. + # summary: Kubelet Pod startup latency is too high. + # expr: | + # histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 + # for: 15m + # labels: + # severity: warning + - alert: KubeletClientCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes). + summary: Kubelet has failed to renew its client certificate. + expr: | + increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: KubeletServerCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes). + summary: Kubelet has failed to renew its server certificate. + expr: | + increase(kubelet_server_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + # - alert: KubeletDown + # annotations: + # description: Kubelet has disappeared from Prometheus target discovery. + # summary: Target disappeared from Prometheus target discovery. + # expr: | + # absent(up{job="kubelet", metrics_path="/metrics"} == 1) + # for: 15m + # labels: + # severity: critical + - name: node-exporter + rules: + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up. + summary: Filesystem is predicted to run out of space within the next 24 hours. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast. + summary: Filesystem is predicted to run out of space within the next 4 hours. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 15 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + summary: Filesystem has less than 5% space left. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + summary: Filesystem has less than 3% space left. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up. + summary: Filesystem is predicted to run out of inodes within the next 24 hours. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast. + summary: Filesystem is predicted to run out of inodes within the next 4 hours. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 20 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + summary: Filesystem has less than 5% inodes left. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + summary: Filesystem has less than 3% inodes left. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeNetworkReceiveErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' + summary: Network interface is reporting many receive errors. + expr: | + rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeNetworkTransmitErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' + summary: Network interface is reporting many transmit errors. + expr: | + rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeHighNumberConntrackEntriesUsed + annotations: + description: '{{ $value | humanizePercentage }} of conntrack entries are used.' + summary: Number of conntrack are getting close to the limit. + expr: | + (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 + labels: + severity: warning + - alert: NodeTextFileCollectorScrapeError + annotations: + description: Node Exporter text file collector failed to scrape. + summary: Node Exporter text file collector failed to scrape. + expr: | + node_textfile_scrape_error{job="node-exporter"} == 1 + labels: + severity: warning + - alert: NodeClockSkewDetected + annotations: + description: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. + summary: Clock skew detected. + expr: | + ( + node_timex_offset_seconds > 0.05 + and + deriv(node_timex_offset_seconds[5m]) >= 0 + ) + or + ( + node_timex_offset_seconds < -0.05 + and + deriv(node_timex_offset_seconds[5m]) <= 0 + ) + for: 10m + labels: + severity: warning + - alert: NodeClockNotSynchronising + annotations: + description: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host. + summary: Clock not synchronising. + expr: | + min_over_time(node_timex_sync_status[5m]) == 0 + and + node_timex_maxerror_seconds >= 16 + for: 10m + labels: + severity: warning + - alert: NodeRAIDDegraded + annotations: + description: RAID array '{{ $labels.device }}' on {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. + summary: RAID Array is degraded + expr: | + node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 + for: 15m + labels: + severity: critical + - alert: NodeRAIDDiskFailure + annotations: + description: At least one device in RAID array on {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap. + summary: Failed device in RAID array + expr: | + node_md_disks{state="failed"} > 0 + labels: + severity: warning + - name: prometheus ## prometheus + rules: + - alert: PrometheusBadConfig + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration. + summary: Failed Prometheus configuration reload. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_config_last_reload_successful{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) == 0 + for: 10m + labels: + severity: critical + - alert: PrometheusNotificationQueueRunningFull + annotations: + description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full. + summary: Prometheus alert notification queue predicted to run full in less than 30m. + expr: | + # Without min_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + predict_linear(prometheus_notifications_queue_length{job=~"prometheus-k8s|prometheus-user-workload"}[5m], 60 * 30) + > + min_over_time(prometheus_notifications_queue_capacity{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers + annotations: + description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.' + summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. + expr: | + ( + rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + / + rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + * 100 + > 1 + for: 15m + labels: + severity: warning + - alert: PrometheusNotConnectedToAlertmanagers + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers. + summary: Prometheus is not connected to any Alertmanagers. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_notifications_alertmanagers_discovered{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) < 1 + for: 10m + labels: + severity: warning + - alert: PrometheusTSDBReloadsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h. + summary: Prometheus has issues reloading blocks from disk. + expr: | + increase(prometheus_tsdb_reloads_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusTSDBCompactionsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h. + summary: Prometheus has issues compacting blocks. + expr: | + increase(prometheus_tsdb_compactions_failed_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusNotIngestingSamples + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples. + summary: Prometheus is not ingesting samples. + expr: | + ( + rate(prometheus_tsdb_head_samples_appended_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) <= 0 + and + ( + sum without(scrape_job) (prometheus_target_metadata_cache_entries{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 + or + sum without(rule_group) (prometheus_rule_group_rules{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 + ) + ) + for: 10m + labels: + severity: warning + - alert: PrometheusDuplicateTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp. + summary: Prometheus is dropping samples with duplicate timestamps. + expr: | + rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 1h + labels: + severity: warning + - alert: PrometheusOutOfOrderTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order. + summary: Prometheus drops samples with out-of-order timestamps. + expr: | + rate(prometheus_target_scrapes_sample_out_of_order_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 1h + labels: + severity: warning + - alert: PrometheusRemoteStorageFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }} + summary: Prometheus fails to send samples to remote storage. + expr: | + ( + rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + / + ( + rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + + + rate(prometheus_remote_storage_succeeded_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + ) + * 100 + > 1 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteBehind + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}. + summary: Prometheus remote write is behind. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + - ignoring(remote_name, url) group_right + max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + > 120 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteDesiredShards + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job=~"prometheus-k8s|prometheus-user-workload"}` $labels.instance | query | first | value }}. + summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_shards_desired{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + > + max_over_time(prometheus_remote_storage_shards_max{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusRuleFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m. + summary: Prometheus is failing rule evaluations. + expr: | + increase(prometheus_rule_evaluation_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: critical + - alert: PrometheusMissingRuleEvaluations + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m. + summary: Prometheus is missing rule evaluations due to slow rule group evaluation. + expr: | + increase(prometheus_rule_group_iterations_missed_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusTargetLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit. + summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit. + expr: | + increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToAnyAlertmanager + annotations: + description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.' + summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. + expr: | + min without (alertmanager) ( + rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) + / + rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) + ) + * 100 + > 3 + for: 15m + labels: + severity: critical diff --git a/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml b/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml new file mode 100644 index 000000000..a630fe5c6 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml @@ -0,0 +1,687 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubernetes-monitoring-rules + namespace: open-cluster-management-addon-observability +data: + kubernetes-monitoring-rules.yaml: | + groups: + - name: kube-apiserver-burnrate.rules + rules: + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1d])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) + labels: + verb: read + record: apiserver_request:burnrate1d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) + labels: + verb: read + record: apiserver_request:burnrate1h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[2h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[2h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[2h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) + labels: + verb: read + record: apiserver_request:burnrate2h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30m])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[30m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) + labels: + verb: read + record: apiserver_request:burnrate30m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[3d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[3d])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[3d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) + labels: + verb: read + record: apiserver_request:burnrate3d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[5m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[5m])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[5m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: apiserver_request:burnrate5m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[6h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[6h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[6h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) + labels: + verb: read + record: apiserver_request:burnrate6h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + labels: + verb: write + record: apiserver_request:burnrate1d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + labels: + verb: write + record: apiserver_request:burnrate1h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + labels: + verb: write + record: apiserver_request:burnrate2h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + labels: + verb: write + record: apiserver_request:burnrate30m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + labels: + verb: write + record: apiserver_request:burnrate3d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: apiserver_request:burnrate5m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + labels: + verb: write + record: apiserver_request:burnrate6h + - name: kube-apiserver-histogram.rules + rules: + - expr: | + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 + labels: + quantile: "0.99" + verb: read + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 + labels: + quantile: "0.99" + verb: write + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - interval: 3m + name: kube-apiserver-availability.rules + rules: + - expr: | + avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30 + record: code_verb:apiserver_request_total:increase30d + - expr: | + sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) + labels: + verb: read + record: code:apiserver_request_total:increase30d + - expr: | + sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + labels: + verb: write + record: code:apiserver_request_total:increase30d + - expr: | + 1 - ( + ( + # write too slow + sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) + - + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) + ) + + ( + # read too slow + sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"LIST|GET"}[30d])) + - + ( + ( + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope=~"resource|",le="1"}[30d])) + or + vector(0) + ) + + + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="namespace",le="5"}[30d])) + + + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="cluster",le="40"}[30d])) + ) + ) + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d) + labels: + verb: all + record: apiserver_request:availability30d + - expr: | + 1 - ( + sum by (cluster) (increase(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30d])) + - + ( + # too slow + ( + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30d])) + or + vector(0) + ) + + + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30d])) + + + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[30d])) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read"}) + labels: + verb: read + record: apiserver_request:availability30d + - expr: | + 1 - ( + ( + # too slow + sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) + - + sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write"}) + labels: + verb: write + record: apiserver_request:availability30d + - expr: | + sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: code_resource:apiserver_request_total:rate5m + - expr: | + sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: code_resource:apiserver_request_total:rate5m + - expr: | + sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: | + sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: | + sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: | + sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - name: k8s.rules + rules: + - expr: | + sum by (cluster, namespace, pod, container) ( + rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + - expr: | + sum by (cluster, namespace, pod, container) ( + irate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate + - expr: | + container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_working_set_bytes + - expr: | + container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_rss + - expr: | + container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_cache + - expr: | + container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_swap + - expr: | + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests + - expr: | + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_requests:sum + - expr: | + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests + - expr: | + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_requests:sum + - expr: | + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits + - expr: | + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_limits:sum + - expr: | + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits + - expr: | + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_limits:sum + - expr: | + max by (cluster, namespace, workload, pod) ( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( + 1, max by (replicaset, namespace, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics"} + ) + ), + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: deployment + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: | + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: daemonset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: | + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: statefulset + record: namespace_workload_pod:kube_pod_owner:relabel + - name: kube-scheduler.rules + rules: + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - name: node.rules + rules: + - expr: | + topk by(namespace, pod) (1, + max by (node, namespace, pod) ( + label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") + )) + record: 'node_namespace_pod:kube_pod_info:' + - expr: | + count by (cluster, node) (sum by (node, cpu) ( + node_cpu_seconds_total{job="node-exporter"} + * on (namespace, pod) group_left(node) + topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:) + )) + record: node:node_num_cpu:sum + - expr: | + sum( + node_memory_MemAvailable_bytes{job="node-exporter"} or + ( + node_memory_Buffers_bytes{job="node-exporter"} + + node_memory_Cached_bytes{job="node-exporter"} + + node_memory_MemFree_bytes{job="node-exporter"} + + node_memory_Slab_bytes{job="node-exporter"} + ) + ) by (cluster) + record: :node_memory_MemAvailable_bytes:sum + - name: kubelet.rules + rules: + - expr: | + histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.99" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.9" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.5" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/kustomization.yaml b/operators/endpointmetrics/manifests/prometheus/kustomization.yaml new file mode 100644 index 000000000..605f8bb6d --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/kustomization.yaml @@ -0,0 +1,27 @@ +resources: +- kube-prometheus-rules.yaml +- kube-state-metrics-clusterRole.yaml +- kube-state-metrics-clusterRoleBinding.yaml +- kube-state-metrics-deployment.yaml +- kube-state-metrics-service.yaml +- kube-state-metrics-serviceAccount.yaml +- kubernetes-monitoring-rules.yaml +- kubernetes-monitoring-alertingrules.yaml +- node-exporter-clusterRole.yaml +- node-exporter-clusterRoleBinding.yaml +- node-exporter-daemonset.yaml +- node-exporter-rules.yaml +- node-exporter-service.yaml +- node-exporter-serviceAccount.yaml +- prometheus-clusterRole.yaml +- prometheus-clusterRoleBinding.yaml +- prometheus-config.yaml +- prometheus-role.yaml +- prometheus-role-default.yaml +- prometheus-role-kube-system.yaml +- prometheus-roleBinding.yaml +- prometheus-roleBinding-default.yaml +- prometheus-roleBinding-kube-system.yaml +- prometheus-service.yaml +- prometheus-serviceAccount.yaml +- prometheus-statefulset.yaml diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml new file mode 100644 index 000000000..ad783ae9b --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRole.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-exporter +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRoleBinding.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRoleBinding.yaml new file mode 100644 index 000000000..f5fe288ac --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-clusterRoleBinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-exporter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-exporter +subjects: +- kind: ServiceAccount + name: node-exporter + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml new file mode 100644 index 000000000..b530bba59 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-daemonset.yaml @@ -0,0 +1,92 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + name: node-exporter + namespace: open-cluster-management-addon-observability +spec: + selector: + matchLabels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + template: + metadata: + labels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + spec: + containers: + - args: + - --web.listen-address=127.0.0.1:9100 + - --path.sysfs=/host/sys + - --path.rootfs=/host/root + - --no-collector.wifi + - --no-collector.hwmon + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) + - --collector.netclass.ignored-devices=^(veth.*|[a-z0-9]+@if\d+)$ + - --collector.netdev.device-exclude=^(veth.*|[a-z0-9]+@if\d+)$ + image: quay.io/stolostron/node-exporter:2.4.0-SNAPSHOT-2021-08-11-14-15-20 + name: node-exporter + resources: + limits: + cpu: 250m + memory: 180Mi + requests: + cpu: 102m + memory: 180Mi + volumeMounts: + - mountPath: /host/sys + mountPropagation: HostToContainer + name: sys + readOnly: true + - mountPath: /host/root + mountPropagation: HostToContainer + name: root + readOnly: true + - args: + - --logtostderr + - --secure-listen-address=[$(IP)]:9100 + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:9100/ + env: + - name: IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: quay.io/stolostron/kube-rbac-proxy:2.4.0-SNAPSHOT-2021-08-11-14-15-20 + name: kube-rbac-proxy + ports: + - containerPort: 9100 + hostPort: 9100 + name: https + resources: + limits: + cpu: 20m + memory: 40Mi + requests: + cpu: 10m + memory: 20Mi + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + hostNetwork: true + hostPID: true + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 65534 + serviceAccountName: node-exporter + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /sys + name: sys + - hostPath: + path: / + name: root + diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml new file mode 100644 index 000000000..77352962e --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml @@ -0,0 +1,65 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: node-exporter-rules + namespace: open-cluster-management-addon-observability +data: + node-exporter-rules.yaml: | + groups: + - name: node-exporter.rules + rules: + - expr: | + count without (cpu) ( + count without (mode) ( + node_cpu_seconds_total{job="node-exporter"} + ) + ) + record: instance:node_num_cpu:sum + - expr: | + 1 - avg without (cpu, mode) ( + rate(node_cpu_seconds_total{job="node-exporter", mode="idle"}[1m]) + ) + record: instance:node_cpu_utilisation:rate1m + - expr: | + ( + node_load1{job="node-exporter"} + / + instance:node_num_cpu:sum{job="node-exporter"} + ) + record: instance:node_load1_per_cpu:ratio + - expr: | + 1 - ( + node_memory_MemAvailable_bytes{job="node-exporter"} + / + node_memory_MemTotal_bytes{job="node-exporter"} + ) + record: instance:node_memory_utilisation:ratio + - expr: | + rate(node_vmstat_pgmajfault{job="node-exporter"}[1m]) + record: instance:node_vmstat_pgmajfault:rate1m + - expr: | + rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_seconds:rate1m + - expr: | + rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_weighted_seconds:rate1m + - expr: | + sum without (device) ( + rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_bytes_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_bytes_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_drop_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_drop_excluding_lo:rate1m \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-service.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-service.yaml new file mode 100644 index 000000000..9777b18ad --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + name: node-exporter + namespace: open-cluster-management-addon-observability +spec: + clusterIP: None + ports: + - name: https + port: 9100 + targetPort: https + selector: + app.kubernetes.io/component: exporter + app.kubernetes.io/name: node-exporter + diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-serviceAccount.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-serviceAccount.yaml new file mode 100644 index 000000000..7e53b7414 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/node-exporter-serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-exporter + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRole.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRole.yaml new file mode 100644 index 000000000..dadba7540 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRole.yaml @@ -0,0 +1,36 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-k8s +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- nonResourceURLs: + - /metrics + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - nodes/proxy + verbs: + - list + - watch + - get +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRoleBinding.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRoleBinding.yaml new file mode 100644 index 000000000..13dc7316c --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-clusterRoleBinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-k8s +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-k8s +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml new file mode 100644 index 000000000..522698870 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml @@ -0,0 +1,780 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: prometheus-k8s-config + namespace: open-cluster-management-addon-observability +data: + prometheus.yaml: | + global: + evaluation_interval: 30s + scrape_interval: 5m + external_labels: + cluster: _CLUSTERID_ + alerting: + alert_relabel_configs: + - separator: ; + regex: prometheus_replica + replacement: $1 + action: labeldrop + alertmanagers: + - authorization: + type: Bearer + credentials_file: /etc/prometheus/secrets/observability-alertmanager-accessor/token + tls_config: + ca_file: /etc/prometheus/secrets/hub-alertmanager-router-ca/service-ca.crt + server_name: "" + insecure_skip_verify: false + follow_redirects: true + scheme: https + path_prefix: / + timeout: 10s + api_version: v2 + static_configs: + - targets: + - _ALERTMANAGER_ENDPOINT_ + rule_files: + - /etc/prometheus/rules/prometheus-k8s-rulefiles-0/*.yaml + scrape_configs: + - job_name: serviceMonitor/open-cluster-management-addon-observability/coredns/0 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - kube-system + scrape_interval: 15s + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_k8s_app + regex: kube-dns + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: metrics + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: metrics + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + - job_name: serviceMonitor/open-cluster-management-addon-observability/kube-apiserver/0 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - default + scrape_interval: 30s + scheme: https + tls_config: + insecure_skip_verify: false + server_name: kubernetes + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_component + regex: apiserver + - action: keep + source_labels: + - __meta_kubernetes_service_label_provider + regex: kubernetes + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_component + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + metric_relabel_configs: + - source_labels: + - __name__ + regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) + action: drop + - source_labels: + - __name__ + regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) + action: drop + - source_labels: + - __name__ + regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) + action: drop + - source_labels: + - __name__ + regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) + action: drop + - source_labels: + - __name__ + regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) + action: drop + - source_labels: + - __name__ + regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) + action: drop + - source_labels: + - __name__ + regex: transformation_(transformation_latencies_microseconds|failures_total) + action: drop + - source_labels: + - __name__ + regex: (admission_quota_controller_adds|admission_quota_controller_depth|admission_quota_controller_longest_running_processor_microseconds|admission_quota_controller_queue_latency|admission_quota_controller_unfinished_work_seconds|admission_quota_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|APIServiceOpenAPIAggregationControllerQueue1_depth|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_retries|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_adds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|APIServiceRegistrationController_queue_latency|APIServiceRegistrationController_retries|APIServiceRegistrationController_unfinished_work_seconds|APIServiceRegistrationController_work_duration|autoregister_adds|autoregister_depth|autoregister_longest_running_processor_microseconds|autoregister_queue_latency|autoregister_retries|autoregister_unfinished_work_seconds|autoregister_work_duration|AvailableConditionController_adds|AvailableConditionController_depth|AvailableConditionController_longest_running_processor_microseconds|AvailableConditionController_queue_latency|AvailableConditionController_retries|AvailableConditionController_unfinished_work_seconds|AvailableConditionController_work_duration|crd_autoregistration_controller_adds|crd_autoregistration_controller_depth|crd_autoregistration_controller_longest_running_processor_microseconds|crd_autoregistration_controller_queue_latency|crd_autoregistration_controller_retries|crd_autoregistration_controller_unfinished_work_seconds|crd_autoregistration_controller_work_duration|crdEstablishing_adds|crdEstablishing_depth|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_queue_latency|crdEstablishing_retries|crdEstablishing_unfinished_work_seconds|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_finalizer_longest_running_processor_microseconds|crd_finalizer_queue_latency|crd_finalizer_retries|crd_finalizer_unfinished_work_seconds|crd_finalizer_work_duration|crd_naming_condition_controller_adds|crd_naming_condition_controller_depth|crd_naming_condition_controller_longest_running_processor_microseconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|crd_naming_condition_controller_unfinished_work_seconds|crd_naming_condition_controller_work_duration|crd_openapi_controller_adds|crd_openapi_controller_depth|crd_openapi_controller_longest_running_processor_microseconds|crd_openapi_controller_queue_latency|crd_openapi_controller_retries|crd_openapi_controller_unfinished_work_seconds|crd_openapi_controller_work_duration|DiscoveryController_adds|DiscoveryController_depth|DiscoveryController_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_retries|DiscoveryController_unfinished_work_seconds|DiscoveryController_work_duration|kubeproxy_sync_proxy_rules_latency_microseconds|non_structural_schema_condition_controller_adds|non_structural_schema_condition_controller_depth|non_structural_schema_condition_controller_longest_running_processor_microseconds|non_structural_schema_condition_controller_queue_latency|non_structural_schema_condition_controller_retries|non_structural_schema_condition_controller_unfinished_work_seconds|non_structural_schema_condition_controller_work_duration|rest_client_request_latency_seconds|storage_operation_errors_total|storage_operation_status_count) + action: drop + - source_labels: + - __name__ + regex: etcd_(debugging|disk|server).* + action: drop + - source_labels: + - __name__ + regex: apiserver_admission_controller_admission_latencies_seconds_.* + action: drop + - source_labels: + - __name__ + regex: apiserver_admission_step_admission_latencies_seconds_.* + action: drop + - source_labels: + - __name__ + - le + regex: apiserver_request_duration_seconds_bucket;(0.15|0.25|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2.5|3|3.5|4.5|6|7|8|9|15|25|30|50) + action: drop + - source_labels: + - __name__ + regex: (_DISABLED_METRICS_) + action: drop + - job_name: serviceMonitor/open-cluster-management-addon-observability/kube-controller-manager/0 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - kube-system + scrape_interval: 30s + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + regex: kube-controller-manager + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https-metrics + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https-metrics + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + metric_relabel_configs: + - source_labels: + - __name__ + regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) + action: drop + - source_labels: + - __name__ + regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) + action: drop + - source_labels: + - __name__ + regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) + action: drop + - source_labels: + - __name__ + regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) + action: drop + - source_labels: + - __name__ + regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) + action: drop + - source_labels: + - __name__ + regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) + action: drop + - source_labels: + - __name__ + regex: transformation_(transformation_latencies_microseconds|failures_total) + action: drop + - source_labels: + - __name__ + regex: (admission_quota_controller_adds|admission_quota_controller_depth|admission_quota_controller_longest_running_processor_microseconds|admission_quota_controller_queue_latency|admission_quota_controller_unfinished_work_seconds|admission_quota_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|APIServiceOpenAPIAggregationControllerQueue1_depth|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_retries|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_adds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|APIServiceRegistrationController_queue_latency|APIServiceRegistrationController_retries|APIServiceRegistrationController_unfinished_work_seconds|APIServiceRegistrationController_work_duration|autoregister_adds|autoregister_depth|autoregister_longest_running_processor_microseconds|autoregister_queue_latency|autoregister_retries|autoregister_unfinished_work_seconds|autoregister_work_duration|AvailableConditionController_adds|AvailableConditionController_depth|AvailableConditionController_longest_running_processor_microseconds|AvailableConditionController_queue_latency|AvailableConditionController_retries|AvailableConditionController_unfinished_work_seconds|AvailableConditionController_work_duration|crd_autoregistration_controller_adds|crd_autoregistration_controller_depth|crd_autoregistration_controller_longest_running_processor_microseconds|crd_autoregistration_controller_queue_latency|crd_autoregistration_controller_retries|crd_autoregistration_controller_unfinished_work_seconds|crd_autoregistration_controller_work_duration|crdEstablishing_adds|crdEstablishing_depth|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_queue_latency|crdEstablishing_retries|crdEstablishing_unfinished_work_seconds|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_finalizer_longest_running_processor_microseconds|crd_finalizer_queue_latency|crd_finalizer_retries|crd_finalizer_unfinished_work_seconds|crd_finalizer_work_duration|crd_naming_condition_controller_adds|crd_naming_condition_controller_depth|crd_naming_condition_controller_longest_running_processor_microseconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|crd_naming_condition_controller_unfinished_work_seconds|crd_naming_condition_controller_work_duration|crd_openapi_controller_adds|crd_openapi_controller_depth|crd_openapi_controller_longest_running_processor_microseconds|crd_openapi_controller_queue_latency|crd_openapi_controller_retries|crd_openapi_controller_unfinished_work_seconds|crd_openapi_controller_work_duration|DiscoveryController_adds|DiscoveryController_depth|DiscoveryController_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_retries|DiscoveryController_unfinished_work_seconds|DiscoveryController_work_duration|kubeproxy_sync_proxy_rules_latency_microseconds|non_structural_schema_condition_controller_adds|non_structural_schema_condition_controller_depth|non_structural_schema_condition_controller_longest_running_processor_microseconds|non_structural_schema_condition_controller_queue_latency|non_structural_schema_condition_controller_retries|non_structural_schema_condition_controller_unfinished_work_seconds|non_structural_schema_condition_controller_work_duration|rest_client_request_latency_seconds|storage_operation_errors_total|storage_operation_status_count) + action: drop + - source_labels: + - __name__ + regex: etcd_(debugging|disk|request|server).* + action: drop + - job_name: serviceMonitor/open-cluster-management-addon-observability/kube-scheduler/0 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - kube-system + scrape_interval: 30s + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + regex: kube-scheduler + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https-metrics + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https-metrics + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + - job_name: serviceMonitor/open-cluster-management-addon-observability/kube-state-metrics/0 + honor_labels: true + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - open-cluster-management-addon-observability + scrape_interval: 30s + scrape_timeout: 30s + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_component + regex: exporter + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + regex: kube-state-metrics + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https-main + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https-main + - regex: (pod|service|endpoint|namespace) + action: labeldrop + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + - job_name: serviceMonitor/open-cluster-management-addon-observability/kube-state-metrics/1 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - open-cluster-management-addon-observability + scrape_interval: 30s + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_component + regex: exporter + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + regex: kube-state-metrics + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https-self + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https-self + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + - job_name: serviceMonitor/open-cluster-management-addon-observability/node-exporter/0 + honor_labels: false + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - open-cluster-management-addon-observability + scrape_interval: 15s + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + relabel_configs: + - source_labels: + - job + target_label: __tmp_prometheus_job_name + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_component + regex: exporter + - action: keep + source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + regex: node-exporter + - action: keep + source_labels: + - __meta_kubernetes_endpoint_port_name + regex: https + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Node;(.*) + replacement: ${1} + target_label: node + - source_labels: + - __meta_kubernetes_endpoint_address_target_kind + - __meta_kubernetes_endpoint_address_target_name + separator: ; + regex: Pod;(.*) + replacement: ${1} + target_label: pod + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_pod_container_name + target_label: container + - source_labels: + - __meta_kubernetes_service_name + target_label: job + replacement: ${1} + - source_labels: + - __meta_kubernetes_service_label_app_kubernetes_io_name + target_label: job + regex: (.+) + replacement: ${1} + - target_label: endpoint + replacement: https + - source_labels: + - __meta_kubernetes_pod_node_name + target_label: instance + regex: (.*) + replacement: $1 + action: replace + - source_labels: + - __address__ + target_label: __tmp_hash + modulus: 1 + action: hashmod + - source_labels: + - __tmp_hash + regex: 0 + action: keep + - job_name: kubelet-metrics-cadvisor + honor_timestamps: false + scrape_interval: 30s + metrics_path: /metrics/cadvisor + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc.cluster.local:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: node + replacement: ${1} + - target_label: metrics_path + replacement: /metrics/cadvisor + - target_label: job + replacement: kubelet + metric_relabel_configs: + - source_labels: + - __name__ + regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + action: drop + - source_labels: + - __name__ + - pod + - namespace + regex: (container_fs_.*|container_spec_.*|container_blkio_device_usage_total|container_file_descriptors|container_sockets|container_threads_max|container_threads|container_start_time_seconds|container_last_seen);; + action: drop + - source_labels: + - __name__ + regex: (_DISABLED_METRICS_) + action: drop + - job_name: kubelet-metrics-probes + honor_timestamps: false + scrape_interval: 30s + metrics_path: /metrics/cadvisor + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc.cluster.local:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/probes + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: node + replacement: ${1} + - target_label: metrics_path + replacement: /metrics/cadvisor + - target_label: job + replacement: kubelet + metric_relabel_configs: + - source_labels: + - __name__ + regex: (_DISABLED_METRICS_) + action: drop + - job_name: kubelet-metrics + honor_timestamps: false + scrape_interval: 30s + metrics_path: /metrics/cadvisor + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc.cluster.local:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: node + replacement: ${1} + - target_label: metrics_path + replacement: /metrics/cadvisor + - target_label: job + replacement: kubelet + metric_relabel_configs: + - source_labels: + - __name__ + regex: kubelet_(pod_worker_latency_microseconds|pod_start_latency_microseconds|cgroup_manager_latency_microseconds|pod_worker_start_latency_microseconds|pleg_relist_latency_microseconds|pleg_relist_interval_microseconds|runtime_operations|runtime_operations_latency_microseconds|runtime_operations_errors|eviction_stats_age_microseconds|device_plugin_registration_count|device_plugin_alloc_latency_microseconds|network_plugin_operations_latency_microseconds) + action: drop + - source_labels: + - __name__ + regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) + action: drop + - source_labels: + - __name__ + regex: apiserver_(request_count|request_latencies|request_latencies_summary|dropped_requests|storage_data_key_generation_latencies_microseconds|storage_transformation_failures_total|storage_transformation_latencies_microseconds|proxy_tunnel_sync_latency_secs) + action: drop + - source_labels: + - __name__ + regex: kubelet_docker_(operations|operations_latency_microseconds|operations_errors|operations_timeout) + action: drop + - source_labels: + - __name__ + regex: reflector_(items_per_list|items_per_watch|list_duration_seconds|lists_total|short_watches_total|watch_duration_seconds|watches_total) + action: drop + - source_labels: + - __name__ + regex: etcd_(helper_cache_hit_count|helper_cache_miss_count|helper_cache_entry_count|request_cache_get_latencies_summary|request_cache_add_latencies_summary|request_latencies_summary) + action: drop + - source_labels: + - __name__ + regex: transformation_(transformation_latencies_microseconds|failures_total) + action: drop + - source_labels: + - __name__ + regex: (admission_quota_controller_adds|admission_quota_controller_depth|admission_quota_controller_longest_running_processor_microseconds|admission_quota_controller_queue_latency|admission_quota_controller_unfinished_work_seconds|admission_quota_controller_work_duration|APIServiceOpenAPIAggregationControllerQueue1_adds|APIServiceOpenAPIAggregationControllerQueue1_depth|APIServiceOpenAPIAggregationControllerQueue1_longest_running_processor_microseconds|APIServiceOpenAPIAggregationControllerQueue1_queue_latency|APIServiceOpenAPIAggregationControllerQueue1_retries|APIServiceOpenAPIAggregationControllerQueue1_unfinished_work_seconds|APIServiceOpenAPIAggregationControllerQueue1_work_duration|APIServiceRegistrationController_adds|APIServiceRegistrationController_depth|APIServiceRegistrationController_longest_running_processor_microseconds|APIServiceRegistrationController_queue_latency|APIServiceRegistrationController_retries|APIServiceRegistrationController_unfinished_work_seconds|APIServiceRegistrationController_work_duration|autoregister_adds|autoregister_depth|autoregister_longest_running_processor_microseconds|autoregister_queue_latency|autoregister_retries|autoregister_unfinished_work_seconds|autoregister_work_duration|AvailableConditionController_adds|AvailableConditionController_depth|AvailableConditionController_longest_running_processor_microseconds|AvailableConditionController_queue_latency|AvailableConditionController_retries|AvailableConditionController_unfinished_work_seconds|AvailableConditionController_work_duration|crd_autoregistration_controller_adds|crd_autoregistration_controller_depth|crd_autoregistration_controller_longest_running_processor_microseconds|crd_autoregistration_controller_queue_latency|crd_autoregistration_controller_retries|crd_autoregistration_controller_unfinished_work_seconds|crd_autoregistration_controller_work_duration|crdEstablishing_adds|crdEstablishing_depth|crdEstablishing_longest_running_processor_microseconds|crdEstablishing_queue_latency|crdEstablishing_retries|crdEstablishing_unfinished_work_seconds|crdEstablishing_work_duration|crd_finalizer_adds|crd_finalizer_depth|crd_finalizer_longest_running_processor_microseconds|crd_finalizer_queue_latency|crd_finalizer_retries|crd_finalizer_unfinished_work_seconds|crd_finalizer_work_duration|crd_naming_condition_controller_adds|crd_naming_condition_controller_depth|crd_naming_condition_controller_longest_running_processor_microseconds|crd_naming_condition_controller_queue_latency|crd_naming_condition_controller_retries|crd_naming_condition_controller_unfinished_work_seconds|crd_naming_condition_controller_work_duration|crd_openapi_controller_adds|crd_openapi_controller_depth|crd_openapi_controller_longest_running_processor_microseconds|crd_openapi_controller_queue_latency|crd_openapi_controller_retries|crd_openapi_controller_unfinished_work_seconds|crd_openapi_controller_work_duration|DiscoveryController_adds|DiscoveryController_depth|DiscoveryController_longest_running_processor_microseconds|DiscoveryController_queue_latency|DiscoveryController_retries|DiscoveryController_unfinished_work_seconds|DiscoveryController_work_duration|kubeproxy_sync_proxy_rules_latency_microseconds|non_structural_schema_condition_controller_adds|non_structural_schema_condition_controller_depth|non_structural_schema_condition_controller_longest_running_processor_microseconds|non_structural_schema_condition_controller_queue_latency|non_structural_schema_condition_controller_retries|non_structural_schema_condition_controller_unfinished_work_seconds|non_structural_schema_condition_controller_work_duration|rest_client_request_latency_seconds|storage_operation_errors_total|storage_operation_status_count) + action: drop + - source_labels: + - __name__ + regex: (_DISABLED_METRICS_) + action: drop \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-role-default.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-role-default.yaml new file mode 100644 index 000000000..c6137d397 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-role-default.yaml @@ -0,0 +1,34 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + update-namespace: "false" + name: prometheus-k8s + namespace: default +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-role-kube-system.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-role-kube-system.yaml new file mode 100644 index 000000000..07c59bddb --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-role-kube-system.yaml @@ -0,0 +1,34 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + update-namespace: "false" + name: prometheus-k8s + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-role.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-role.yaml new file mode 100644 index 000000000..b5fcaff51 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-role.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: prometheus-k8s + namespace: open-cluster-management-addon-observability +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-default.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-default.yaml new file mode 100644 index 000000000..c7e5d2bbd --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-default.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + update-namespace: "false" + name: prometheus-k8s + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: open-cluster-management-addon-observability + + diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-kube-system.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-kube-system.yaml new file mode 100644 index 000000000..527c31999 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding-kube-system.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + update-namespace: "false" + name: prometheus-k8s + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: open-cluster-management-addon-observability + + diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding.yaml new file mode 100644 index 000000000..4ffcb78e4 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-roleBinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-k8s + namespace: open-cluster-management-addon-observability +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: open-cluster-management-addon-observability + diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-service.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-service.yaml new file mode 100644 index 000000000..533e3a1d8 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-service.yaml @@ -0,0 +1,18 @@ + +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + name: prometheus-k8s + namespace: open-cluster-management-addon-observability +spec: + ports: + - name: https + port: 9091 + targetPort: https + selector: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + sessionAffinity: ClientIP \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-serviceAccount.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-serviceAccount.yaml new file mode 100644 index 000000000..d7d88ea23 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus-k8s + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml new file mode 100644 index 000000000..b8a2501f7 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml @@ -0,0 +1,165 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + name: prometheus-k8s + namespace: open-cluster-management-addon-observability +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + serviceName: prometheus-operated + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: prometheus + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + spec: + containers: + - args: + - --config.file=/etc/prometheus/config_out/prometheus.yaml + - --storage.tsdb.path=/prometheus + - --storage.tsdb.retention.time=24h + - --web.enable-lifecycle + - --web.route-prefix=/ + image: quay.io/stolostron/prometheus:2.4.0-SNAPSHOT-2021-08-11-14-15-20 + imagePullPolicy: IfNotPresent + name: prometheus + ports: + - containerPort: 9090 + name: web + protocol: TCP + readinessProbe: + failureThreshold: 120 + httpGet: + path: /-/ready + port: web + scheme: HTTP + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + resources: + requests: + memory: 400Mi + volumeMounts: + - mountPath: /prometheus + name: prometheus-k8s-db + - mountPath: /etc/prometheus/config_out + name: config-out + readOnly: true + - name: node-exporter-rules + mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/node-exporter-rules.yaml + subPath: node-exporter-rules.yaml + - name: kubernetes-monitoring-rules + mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubernetes-monitoring-rules.yaml + subPath: kubernetes-monitoring-rules.yaml + - name: kube-prometheus-rules + mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kube-prometheus-rules.yaml + subPath: kube-prometheus-rules.yaml + - name: kubernetes-monitoring-alertingrules + mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubernetes-monitoring-alertingrules.yaml + subPath: kubernetes-monitoring-alertingrules.yaml + - name: observability-alertmanager-accessor + mountPath: /etc/prometheus/secrets/observability-alertmanager-accessor + readOnly: true + - name: hub-alertmanager-router-ca + mountPath: /etc/prometheus/secrets/hub-alertmanager-router-ca + readOnly: true + - args: + - --logtostderr + - --secure-listen-address=[$(IP)]:9091 + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:9090/ + env: + - name: IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: quay.io/stolostron/kube-rbac-proxy:2.4.0-SNAPSHOT-2021-08-11-14-15-20 + name: kube-rbac-proxy + ports: + - containerPort: 9091 + hostPort: 9091 + name: https + resources: + limits: + cpu: 20m + memory: 40Mi + requests: + cpu: 10m + memory: 20Mi + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + - args: + - -webhook-url=http://localhost:9090/-/reload + - -volume-dir=/etc/prometheus/secrets/hub-alertmanager-router-ca + - -volume-dir=/etc/prometheus/secrets/observability-alertmanager-accessor + - -volume-dir=/etc/prometheus/config_out + image: quay.io/openshift/origin-configmap-reloader:4.5.0 + imagePullPolicy: IfNotPresent + name: config-reloader + resources: + requests: + cpu: 4m + memory: 25Mi + volumeMounts: + - mountPath: /etc/prometheus/config_out + name: config-out + readOnly: true + - name: observability-alertmanager-accessor + mountPath: /etc/prometheus/secrets/observability-alertmanager-accessor + readOnly: true + - name: hub-alertmanager-router-ca + mountPath: /etc/prometheus/secrets/hub-alertmanager-router-ca + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + serviceAccount: prometheus-k8s + serviceAccountName: prometheus-k8s + terminationGracePeriodSeconds: 600 + volumes: + - emptyDir: {} + name: prometheus-k8s-db + - configMap: + defaultMode: 420 + name: prometheus-k8s-config + name: config-out + - configMap: + defaultMode: 420 + name: node-exporter-rules + name: node-exporter-rules + - configMap: + defaultMode: 420 + name: kubernetes-monitoring-rules + name: kubernetes-monitoring-rules + - configMap: + defaultMode: 420 + name: kube-prometheus-rules + name: kube-prometheus-rules + - configMap: + defaultMode: 420 + name: kubernetes-monitoring-alertingrules + name: kubernetes-monitoring-alertingrules + - secret: + defaultMode: 420 + secretName: observability-alertmanager-accessor + name: observability-alertmanager-accessor + - secret: + defaultMode: 420 + secretName: hub-alertmanager-router-ca + name: hub-alertmanager-router-ca diff --git a/operators/endpointmetrics/pkg/rendering/renderer.go b/operators/endpointmetrics/pkg/rendering/renderer.go new file mode 100644 index 000000000..96ec346eb --- /dev/null +++ b/operators/endpointmetrics/pkg/rendering/renderer.go @@ -0,0 +1,175 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "context" + "fmt" + "os" + "strings" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/stolostron/multicluster-observability-operator/operators/endpointmetrics/pkg/rendering/templates" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + metricsConfigMapKey = "metrics_list.yaml" +) + +var ( + namespace = os.Getenv("WATCH_NAMESPACE") + log = logf.Log.WithName("renderer") + disabledMetrics = []string{ + "apiserver_admission_controller_admission_duration_seconds_bucket", + "apiserver_flowcontrol_priority_level_request_count_watermarks_bucket", + "apiserver_response_sizes_bucket", + "apiserver_watch_events_sizes_bucket", + "container_memory_failures_total", + "cluster_quantile:apiserver_request_duration_seconds:histogram_quantile", + "etcd_request_duration_seconds_bucket", + "kubelet_http_requests_duration_seconds_bucket", + "kubelet_runtime_operations_duration_seconds_bucket", + "rest_client_request_duration_seconds_bucket", + "storage_operation_duration_seconds_bucket", + } +) + +var Images = map[string]string{} + +func Render(r *rendererutil.Renderer, c runtimeclient.Client, hubInfo *operatorconfig.HubInfo) ([]*unstructured.Unstructured, error) { + + genericTemplates, err := templates.GetTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + resources, err := r.RenderTemplates(genericTemplates, namespace, map[string]string{}) + if err != nil { + return nil, err + } + for idx := range resources { + if resources[idx].GetKind() == "Deployment" && resources[idx].GetName() == "kube-state-metrics" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + dep := obj.(*v1.Deployment) + spec := &dep.Spec.Template.Spec + spec.Containers[0].Image = Images[operatorconfig.KubeStateMetricsKey] + spec.Containers[1].Image = Images[operatorconfig.KubeRbacProxyKey] + spec.Containers[2].Image = Images[operatorconfig.KubeRbacProxyKey] + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: os.Getenv(operatorconfig.PullSecret)}, + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + if resources[idx].GetKind() == "StatefulSet" && resources[idx].GetName() == "prometheus-k8s" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + sts := obj.(*v1.StatefulSet) + spec := &sts.Spec.Template.Spec + spec.Containers[0].Image = Images[operatorconfig.PrometheusKey] + spec.Containers[1].Image = Images[operatorconfig.KubeRbacProxyKey] + spec.Containers[2].Image = Images[operatorconfig.ConfigmapReloaderKey] + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: os.Getenv(operatorconfig.PullSecret)}, + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + if resources[idx].GetKind() == "DaemonSet" && resources[idx].GetName() == "node-exporter" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + ds := obj.(*v1.DaemonSet) + spec := &ds.Spec.Template.Spec + spec.Containers[0].Image = Images[operatorconfig.NodeExporterKey] + spec.Containers[1].Image = Images[operatorconfig.KubeRbacProxyKey] + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: os.Getenv(operatorconfig.PullSecret)}, + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + if resources[idx].GetKind() == "ConfigMap" && resources[idx].GetName() == "prometheus-k8s-config" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + cm := obj.(*corev1.ConfigMap) + promConfig, exists := cm.Data["prometheus.yaml"] + if !exists { + return nil, fmt.Errorf("no key 'prometheus.yaml' found in the configmap: %s/%s", cm.GetNamespace(), cm.GetName()) + } + // replace the hub alertmanager address + hubAmEp := strings.TrimLeft(hubInfo.AlertmanagerEndpoint, "https://") + promConfig = strings.ReplaceAll(promConfig, "_ALERTMANAGER_ENDPOINT_", hubAmEp) + // replace the cluster ID with clusterName in hubInfo + promConfig = strings.ReplaceAll(promConfig, "_CLUSTERID_", hubInfo.ClusterName) + + // replace the disabled metrics + disabledMetricsSt, err := getDisabledMetrics(c) + if err != nil { + return nil, err + } + if disabledMetricsSt != "" { + cm.Data["prometheus.yaml"] = strings.ReplaceAll(promConfig, "_DISABLED_METRICS_", disabledMetricsSt) + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + } + + return resources, nil +} + +func getDisabledMetrics(c runtimeclient.Client) (string, error) { + cm := &corev1.ConfigMap{} + err := c.Get(context.TODO(), types.NamespacedName{Name: operatorconfig.AllowlistConfigMapName, + Namespace: namespace}, cm) + if err != nil { + return "", err + } + metricsList := []string{} + for _, m := range disabledMetrics { + if !strings.Contains(cm.Data[metricsConfigMapKey], m) { + metricsList = append(metricsList, m) + } + } + return strings.Join(metricsList, "|"), nil +} diff --git a/operators/endpointmetrics/pkg/rendering/renderer_test.go b/operators/endpointmetrics/pkg/rendering/renderer_test.go new file mode 100644 index 000000000..c895f2e44 --- /dev/null +++ b/operators/endpointmetrics/pkg/rendering/renderer_test.go @@ -0,0 +1,67 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "os" + "path" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +func getAllowlistCM() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.AllowlistConfigMapName, + Namespace: namespace, + }, + Data: map[string]string{ + metricsConfigMapKey: ` +names: + - apiserver_watch_events_sizes_bucket +`}, + } +} + +func TestRender(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working dir %v", err) + } + templatesPath := path.Join(path.Dir(path.Dir(wd)), "manifests") + os.Setenv(templatesutil.TemplatesPathEnvVar, templatesPath) + defer os.Unsetenv(templatesutil.TemplatesPathEnvVar) + + renderer := rendererutil.NewRenderer() + hubInfo := &operatorconfig.HubInfo{ + ClusterName: "foo", + ObservatoriumAPIEndpoint: "testing.com", + AlertmanagerEndpoint: "testing.com", + AlertmanagerRouterCA: "testing", + } + + c := fake.NewFakeClient([]runtime.Object{getAllowlistCM()}...) + + objs, err := Render(renderer, c, hubInfo) + if err != nil { + t.Fatalf("failed to render endpoint templates: %v", err) + } + + printObjs(t, objs) +} + +func printObjs(t *testing.T, objs []*unstructured.Unstructured) { + for _, obj := range objs { + t.Log(obj) + } +} diff --git a/operators/endpointmetrics/pkg/rendering/templates/templates.go b/operators/endpointmetrics/pkg/rendering/templates/templates.go new file mode 100644 index 000000000..6347431b4 --- /dev/null +++ b/operators/endpointmetrics/pkg/rendering/templates/templates.go @@ -0,0 +1,24 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package templates + +import ( + "sigs.k8s.io/kustomize/v3/pkg/resource" + + "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +// GetTemplates reads base manifest +func GetTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + + // resourceList contains all kustomize resources + resourceList := []*resource.Resource{} + + // add prometheus template + if err := r.AddTemplateFromPath(r.GetTemplatesPath()+"/prometheus", &resourceList); err != nil { + return resourceList, err + } + + return resourceList, nil +} diff --git a/operators/endpointmetrics/pkg/rendering/templates/templates_test.go b/operators/endpointmetrics/pkg/rendering/templates/templates_test.go new file mode 100644 index 000000000..f71e6b0dc --- /dev/null +++ b/operators/endpointmetrics/pkg/rendering/templates/templates_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package templates + +import ( + "os" + "path" + "testing" + + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +func TestGetCoreTemplates(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working dir %v", err) + } + templatesPath := path.Join(path.Dir(path.Dir(path.Dir(wd))), "manifests") + os.Setenv(templatesutil.TemplatesPathEnvVar, templatesPath) + defer os.Unsetenv(templatesutil.TemplatesPathEnvVar) + + _, err = GetTemplates(templatesutil.GetTemplateRenderer()) + + if err != nil { + t.Fatalf("failed to render core template %v", err) + } +} diff --git a/operators/endpointmetrics/pkg/util/client.go b/operators/endpointmetrics/pkg/util/client.go new file mode 100644 index 000000000..a3cd89ee5 --- /dev/null +++ b/operators/endpointmetrics/pkg/util/client.go @@ -0,0 +1,76 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package util + +import ( + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + ocpClientSet "github.com/openshift/client-go/config/clientset/versioned" + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +var ( + hubClient client.Client + ocpClient ocpClientSet.Interface +) + +var ( + log = ctrl.Log.WithName("util") +) + +const ( + hubKubeConfigPath = "/spoke/hub-kubeconfig/kubeconfig" +) + +// GetOrCreateOCPClient get an existing hub client or create new one if it doesn't exist +func GetOrCreateHubClient() (client.Client, error) { + if hubClient != nil { + return hubClient, nil + } + // create the config from the path + config, err := clientcmd.BuildConfigFromFlags("", hubKubeConfigPath) + if err != nil { + log.Error(err, "Failed to create the config") + return nil, err + } + + s := scheme.Scheme + if err := oav1beta1.AddToScheme(s); err != nil { + return nil, err + } + + // generate the client based off of the config + hubClient, err := client.New(config, client.Options{Scheme: s}) + + if err != nil { + log.Error(err, "Failed to create hub client") + return nil, err + } + + return hubClient, err +} + +// GetOrCreateOCPClient get an existing ocp client or create new one if it doesn't exist +func GetOrCreateOCPClient() (ocpClientSet.Interface, error) { + if ocpClient != nil { + return ocpClient, nil + } + // create the config from the path + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + log.Error(err, "Failed to create the config") + return nil, err + } + + // generate the client based off of the config + ocpClient, err = ocpClientSet.NewForConfig(config) + if err != nil { + log.Error(err, "Failed to create ocp config client") + return nil, err + } + + return ocpClient, err +} diff --git a/operators/endpointmetrics/pkg/util/lease.go b/operators/endpointmetrics/pkg/util/lease.go new file mode 100644 index 000000000..2e2f4d947 --- /dev/null +++ b/operators/endpointmetrics/pkg/util/lease.go @@ -0,0 +1,51 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. + +package util + +import ( + "context" + "os" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + "open-cluster-management.io/addon-framework/pkg/lease" +) + +const ( + leaseName = "observability-controller" +) + +var ( + namespace = os.Getenv("WATCH_NAMESPACE") + clusterName = os.Getenv("HUB_NAMESPACE") +) + +func StartLease() { + config, err := rest.InClusterConfig() + if err != nil { + log.Error(err, "Failed to create incluster config") + panic(err.Error()) + } + // creates the clientset + c, err := kubernetes.NewForConfig(config) + if err != nil { + log.Error(err, "Failed to create kube client") + panic(err.Error()) + } + + // create the config from the path + hubConfig, err := clientcmd.BuildConfigFromFlags("", hubKubeConfigPath) + if err != nil { + log.Error(err, "Failed to create the hub config") + panic(err.Error()) + } + + actual := lease.CheckAddonPodFunc(c.CoreV1(), namespace, "name=endpoint-observability-operator") + leaseController := lease.NewLeaseUpdater(c, leaseName, namespace, actual). + WithHubLeaseConfig(hubConfig, clusterName) + + go leaseController.Start(context.TODO()) +} diff --git a/operators/endpointmetrics/pkg/util/status.go b/operators/endpointmetrics/pkg/util/status.go new file mode 100644 index 000000000..bbaa8b81b --- /dev/null +++ b/operators/endpointmetrics/pkg/util/status.go @@ -0,0 +1,49 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package util + +import ( + "context" + "time" + + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + conditions = map[string]map[string]string{ + "Deployed": map[string]string{ + "type": "Progressing", + "reason": "Deployed", + "message": "Metrics collector deployed"}, + "Disabled": map[string]string{ + "type": "Disabled", + "reason": "Disabled", + "message": "enableMetrics is set to False"}, + "Degraded": map[string]string{ + "type": "Degraded", + "reason": "Degraded", + "message": "Metrics collector deployment not successful"}, + "NotSupported": map[string]string{ + "type": "NotSupported", + "reason": "NotSupported", + "message": "No Prometheus service found in this cluster"}, + } +) + +func ReportStatus(ctx context.Context, client client.Client, i *oav1beta1.ObservabilityAddon, t string) { + i.Status.Conditions = []oav1beta1.StatusCondition{ + { + Type: conditions[t]["type"], + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: conditions[t]["reason"], + Message: conditions[t]["message"], + }, + } + err := client.Status().Update(ctx, i) + if err != nil { + log.Error(err, "Failed to update status for observabilityaddon") + } +} diff --git a/operators/endpointmetrics/pkg/util/status_test.go b/operators/endpointmetrics/pkg/util/status_test.go new file mode 100644 index 000000000..87ee89e0d --- /dev/null +++ b/operators/endpointmetrics/pkg/util/status_test.go @@ -0,0 +1,70 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package util + +import ( + "context" + "fmt" + "testing" + + oav1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + name = "observability-addon" + testNamespace = "test-ns" +) + +func newObservabilityAddon(name string, ns string) *oav1beta1.ObservabilityAddon { + return &oav1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + } +} + +func TestReportStatus(t *testing.T) { + oa := newObservabilityAddon(name, testNamespace) + objs := []runtime.Object{oa} + s := scheme.Scheme + if err := oav1beta1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add oav1beta1 scheme: (%v)", err) + } + + expectedStatus := []oav1beta1.StatusCondition{ + { + Type: "NotSupported", + Status: metav1.ConditionTrue, + Reason: "NotSupported", + Message: "No Prometheus service found in this cluster", + }, + { + Type: "Progressing", + Status: metav1.ConditionTrue, + Reason: "Deployed", + Message: "Metrics collector deployed", + }, + { + Type: "Disabled", + Status: metav1.ConditionTrue, + Reason: "Disabled", + Message: "enableMetrics is set to False", + }, + } + + statusList := []string{"NotSupported", "Deployed", "Disabled"} + s.AddKnownTypes(oav1beta1.GroupVersion, oa) + c := fake.NewFakeClient(objs...) + for i := range statusList { + ReportStatus(context.TODO(), c, oa, statusList[i]) + if oa.Status.Conditions[0].Message != expectedStatus[i].Message || oa.Status.Conditions[0].Reason != expectedStatus[i].Reason || oa.Status.Conditions[0].Status != expectedStatus[i].Status || oa.Status.Conditions[0].Type != expectedStatus[i].Type { + t.Errorf("Error: Status not updated. Expected: %s, Actual: %s", expectedStatus[i], fmt.Sprintf("%+v\n", oa.Status.Conditions[0])) + } + } + +} diff --git a/operators/endpointmetrics/version/version.go b/operators/endpointmetrics/version/version.go new file mode 100644 index 000000000..07d3b08ce --- /dev/null +++ b/operators/endpointmetrics/version/version.go @@ -0,0 +1,7 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package version + +var ( + Version = "0.0.1" +) diff --git a/operators/multiclusterobservability/Dockerfile b/operators/multiclusterobservability/Dockerfile new file mode 100644 index 000000000..5f750e6e1 --- /dev/null +++ b/operators/multiclusterobservability/Dockerfile @@ -0,0 +1,62 @@ +# Copyright Contributors to the Open Cluster Management project + +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + + +WORKDIR /workspace +COPY go.sum go.mod ./ +COPY ./operators/multiclusterobservability ./operators/multiclusterobservability +COPY ./operators/pkg ./operators/pkg + +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -o bin/manager operators/multiclusterobservability/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ARG VCS_REF +ARG VCS_URL +ARG IMAGE_NAME +ARG IMAGE_DESCRIPTION +ARG IMAGE_DISPLAY_NAME +ARG IMAGE_NAME_ARCH +ARG IMAGE_MAINTAINER +ARG IMAGE_VENDOR +ARG IMAGE_VERSION +ARG IMAGE_RELEASE +ARG IMAGE_SUMMARY +ARG IMAGE_OPENSHIFT_TAGS + +LABEL org.label-schema.vendor="Red Hat" \ + org.label-schema.name="$IMAGE_NAME_ARCH" \ + org.label-schema.description="$IMAGE_DESCRIPTION" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url=$VCS_URL \ + org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \ + org.label-schema.schema-version="1.0" \ + name="$IMAGE_NAME" \ + maintainer="$IMAGE_MAINTAINER" \ + vendor="$IMAGE_VENDOR" \ + version="$IMAGE_VERSION" \ + release="$IMAGE_RELEASE" \ + description="$IMAGE_DESCRIPTION" \ + summary="$IMAGE_SUMMARY" \ + io.k8s.display-name="$IMAGE_DISPLAY_NAME" \ + io.k8s.description="$IMAGE_DESCRIPTION" \ + io.openshift.tags="$IMAGE_OPENSHIFT_TAGS" + +ENV OPERATOR=/usr/local/bin/mco-operator \ + USER_UID=1001 \ + USER_NAME=mco + +RUN microdnf update -y && microdnf clean all + +# install templates +COPY ./operators/multiclusterobservability/manifests /usr/local/manifests + +# install the prestop script +COPY ./operators/multiclusterobservability/prestop.sh /usr/local/bin/prestop.sh + +# install operator binary +COPY --from=builder /workspace/bin/manager ${OPERATOR} +USER ${USER_UID} + +ENTRYPOINT ["/usr/local/bin/mco-operator"] diff --git a/operators/multiclusterobservability/Makefile b/operators/multiclusterobservability/Makefile new file mode 100644 index 000000000..5c0c5d4e6 --- /dev/null +++ b/operators/multiclusterobservability/Makefile @@ -0,0 +1,158 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +-include /opt/build-harness/Makefile.prow + +# MAKEFILE_DIR is the directory that contains the current makefile +# used to locate some common building files +MAKEFILE_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +ROOT_DIR ?= $(shell cd ${MAKEFILE_DIR}/../..; pwd) + +# VERSION defines the project version for the bundle. +# Update this value when you upgrade the version of your project. +# To re-generate a bundle for another specific version without changing the standard setup, you can: +# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) +# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) +VERSION ?= 0.1.0 + +# CHANNELS define the bundle channels used in the bundle. +# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") +# To re-generate a bundle for other specific channels without changing the standard setup, you can: +# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable) +# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable") +ifneq ($(origin CHANNELS), undefined) +BUNDLE_CHANNELS := --channels=$(CHANNELS) +endif + +# DEFAULT_CHANNEL defines the default channel used in the bundle. +# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") +# To re-generate a bundle for any other default channel without changing the default setup, you can: +# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) +# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") +ifneq ($(origin DEFAULT_CHANNEL), undefined) +BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) +endif +BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) + +# BUNDLE_IMG defines the image:tag used for the bundle. +# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) +BUNDLE_IMG ?= controller-bundle:$(VERSION) + +# Image URL to use all building/pushing image targets +IMG ?= quay.io/stolostron/multicluster-observability-operator:latest +# Produce CRDs that work back to Kubernetes 1.11 (no version conversion) +CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +UNAME := $(shell uname) + +ifeq ($(UNAME), Linux) +SED=@sed +endif +ifeq ($(UNAME), Darwin) +# run `brew install gnu-sed` to install gsed +SED=@gsed +endif + +all: manager + +# Run tests +ENVTEST_ASSETS_DIR=$(shell pwd)/testbin +test: generate fmt vet manifests + mkdir -p ${ENVTEST_ASSETS_DIR} + test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh + source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out + +# Build manager binary +manager: generate fmt vet + go build -o bin/manager main.go + +# Run against the configured Kubernetes cluster in ~/.kube/config +run: generate fmt vet manifests + go run ./main.go + +# Install CRDs into a cluster +install: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +# Uninstall CRDs from a cluster +uninstall: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl delete -f - + +# Deploy controller in the configured Kubernetes cluster in ~/.kube/config +deploy: manifests kustomize + cd config/manager && $(KUSTOMIZE) edit set image quay.io/stolostron/multicluster-observability-operator=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +# UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config +undeploy: + $(KUSTOMIZE) build config/default | kubectl delete -f - + +# Generate manifests e.g. CRD, RBAC etc. +manifests: controller-gen + $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases + # TODO(morvencao): replace the sed command with kubebuilder marker + $(SED) -i 's/storage: false/&\n deprecated: true/g' config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml + $(SED) -i 's/storage: false/&\n deprecationWarning: observability.open-cluster-management.io\/v1beta1 MultiClusterObservability is deprecated in v2.3+, unavailable in v2.6+; use observability.open-cluster-management.io\/v1beta2 MultiClusterObservability/g' config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... + +# Generate code +generate: controller-gen + $(CONTROLLER_GEN) object:headerFile="$(ROOT_DIR)/hack/boilerplate.go.txt" paths="./..." + +# Download controller-gen locally if necessary +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen +controller-gen: + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) + +# Download kustomize locally if necessary +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: + $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) + +# go-get-tool will 'go get' any package $2 and install it to $1. +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +define go-get-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ +rm -rf $$TMP_DIR ;\ +} +endef + +.PHONY: build +build: manager + +# Generate bundle manifests and metadata, then validate generated files. +.PHONY: bundle +bundle: manifests kustomize + operator-sdk generate kustomize manifests -q + cd config/manager && $(KUSTOMIZE) edit set image quay.io/stolostron/multicluster-observability-operator=$(IMG) + $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) + #TODO(morvencao): filter the rbac resource with operator-sdk bundle command + rm -f ./bundle/manifests/multicluster-observability-operator-manager-role_rbac.authorization.k8s.io_v1_clusterrole.yaml + rm -f ./bundle/manifests/multicluster-observability-operator-manager-rolebinding_rbac.authorization.k8s.io_v1_clusterrolebinding.yaml + rm -f ./bundle/manifests/multicluster-observability-operator_v1_serviceaccount.yaml + operator-sdk bundle validate ./bundle + +# Build the bundle image. +.PHONY: bundle-build +bundle-build: + docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . diff --git a/operators/multiclusterobservability/OWNERS b/operators/multiclusterobservability/OWNERS new file mode 100644 index 000000000..519e50c83 --- /dev/null +++ b/operators/multiclusterobservability/OWNERS @@ -0,0 +1,9 @@ +approvers: +- clyang82 +- marcolan018 +- morvencao +- songleo + +reviewers: +- haoqing0110 +- bjoydeep diff --git a/operators/multiclusterobservability/PROJECT b/operators/multiclusterobservability/PROJECT new file mode 100644 index 000000000..3d5e1ae23 --- /dev/null +++ b/operators/multiclusterobservability/PROJECT @@ -0,0 +1,18 @@ +domain: open-cluster-management.io +layout: go.kubebuilder.io/v3 +projectName: multicluster-observability-operator +repo: github.com/stolostron/multicluster-observability-operator +resources: +- crdVersion: v1 + group: observability + kind: MultiClusterObservability + version: v1beta2 + webhookVersion: v1 +- crdVersion: v1 + group: observability + kind: ObservabilityAddon + version: v1beta1 +version: 3-alpha +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} diff --git a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go new file mode 100644 index 000000000..9055631c4 --- /dev/null +++ b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go @@ -0,0 +1,95 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +// Package shared contains shared API Schema definitions for the observability API group +// +kubebuilder:object:generate=true +// +groupName=observability.open-cluster-management.io + +package shared + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ObservabilityAddonSpec is the spec of observability addon +type ObservabilityAddonSpec struct { + // EnableMetrics indicates the observability addon push metrics to hub server. + // +optional + // +kubebuilder:default:=true + EnableMetrics bool `json:"enableMetrics"` + + // Interval for the observability addon push metrics to hub server. + // +optional + // +kubebuilder:default:=300 + // +kubebuilder:validation:Minimum=15 + // +kubebuilder:validation:Maximum=3600 + Interval int32 `json:"interval,omitempty"` + + // Resource requirement for metrics-collector + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` +} + +type PreConfiguredStorage struct { + // The key of the secret to select from. Must be a valid secret key. + // Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + // +required + Key string `json:"key"` + // Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + Name string `json:"name"` +} + +// Condition is from metav1.Condition. +// Cannot use it directly because the upgrade issue. +// Have to mark LastTransitionTime and Status as optional. +type Condition struct { + // type of condition in CamelCase or in foo.example.com/CamelCase. + // --- + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + // useful (see .node.status.conditions), the ability to deconflict is important. + // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MaxLength=316 + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + // status of the condition, one of True, False, Unknown. + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=True;False;Unknown + Status metav1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // observedGeneration represents the .metadata.generation that the condition was set based upon. + // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, + // the condition is out of date + // with respect to the current state of the instance. + // +optional + // +kubebuilder:validation:Minimum=0 + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + // lastTransitionTime is the last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when the API + // field changed is acceptable. + // +optional + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // reason contains a programmatic identifier indicating the reason for the condition's last transition. + // Producers of specific condition types may define expected values and meanings for this field, + // and whether the values are considered a guaranteed API. + // The value should be a CamelCase string. + // This field may not be empty. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$` + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // message is a human readable message indicating details about the transition. + // This may be an empty string. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=32768 + Message string `json:"message" protobuf:"bytes,6,opt,name=message"` +} diff --git a/operators/multiclusterobservability/api/shared/zz_generated.deepcopy.go b/operators/multiclusterobservability/api/shared/zz_generated.deepcopy.go new file mode 100644 index 000000000..000609f11 --- /dev/null +++ b/operators/multiclusterobservability/api/shared/zz_generated.deepcopy.go @@ -0,0 +1,77 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package shared + +import ( + "k8s.io/api/core/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityAddonSpec) DeepCopyInto(out *ObservabilityAddonSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityAddonSpec. +func (in *ObservabilityAddonSpec) DeepCopy() *ObservabilityAddonSpec { + if in == nil { + return nil + } + out := new(ObservabilityAddonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreConfiguredStorage) DeepCopyInto(out *PreConfiguredStorage) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreConfiguredStorage. +func (in *PreConfiguredStorage) DeepCopy() *PreConfiguredStorage { + if in == nil { + return nil + } + out := new(PreConfiguredStorage) + in.DeepCopyInto(out) + return out +} diff --git a/operators/multiclusterobservability/api/v1beta1/groupversion_info.go b/operators/multiclusterobservability/api/v1beta1/groupversion_info.go new file mode 100644 index 000000000..65bd9519a --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta1/groupversion_info.go @@ -0,0 +1,38 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the observability v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=observability.open-cluster-management.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.open-cluster-management.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_conversion.go b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_conversion.go new file mode 100644 index 000000000..89a758da9 --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_conversion.go @@ -0,0 +1,124 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta1 + +/* +For imports, we'll need the controller-runtime +[`conversion`](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/conversion) +package, plus the API version for our hub type (v1beta2), and finally some of the +standard packages. +*/ +import ( + "sigs.k8s.io/controller-runtime/pkg/conversion" + + observabilityv1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" +) + +// +kubebuilder:docs-gen:collapse=Imports + +/* +Our "spoke" versions need to implement the +[`Convertible`](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/conversion#Convertible) +interface. Namely, they'll need `ConvertTo` and `ConvertFrom` methods to convert to/from +the hub version. +*/ + +/* +ConvertTo is expected to modify its argument to contain the converted object. +Most of the conversion is straightforward copying, except for converting our changed field. +*/ +// ConvertTo converts this MultiClusterObservability to the Hub version (v1beta2). +func (src *MultiClusterObservability) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*observabilityv1beta2.MultiClusterObservability) + + // TODO(morvencao)?: convert the AvailabilityConfig field + // availabilityConfig := src.Spec.AvailabilityConfig + + dst.Spec.StorageConfig = &observabilityv1beta2.StorageConfig{ + MetricObjectStorage: src.Spec.StorageConfig.MetricObjectStorage, + StorageClass: src.Spec.StorageConfig.StatefulSetStorageClass, + // How to convert the current storage size to new one? + AlertmanagerStorageSize: src.Spec.StorageConfig.StatefulSetSize, + RuleStorageSize: src.Spec.StorageConfig.StatefulSetSize, + StoreStorageSize: src.Spec.StorageConfig.StatefulSetSize, + CompactStorageSize: src.Spec.StorageConfig.StatefulSetSize, + ReceiveStorageSize: src.Spec.StorageConfig.StatefulSetSize, + } + + dst.Spec.AdvancedConfig = &observabilityv1beta2.AdvancedConfig{ + RetentionConfig: &observabilityv1beta2.RetentionConfig{ + RetentionResolutionRaw: src.Spec.RetentionResolutionRaw, + RetentionResolution5m: src.Spec.RetentionResolution5m, + RetentionResolution1h: src.Spec.RetentionResolution1h, + }, + } + + dst.Spec.EnableDownsampling = src.Spec.EnableDownSampling + + /* + The rest of the conversion is pretty rote. + */ + // ObjectMeta + dst.ObjectMeta = src.ObjectMeta + + // Spec + dst.Spec.ImagePullPolicy = src.Spec.ImagePullPolicy + dst.Spec.ImagePullSecret = src.Spec.ImagePullSecret + dst.Spec.NodeSelector = src.Spec.NodeSelector + dst.Spec.Tolerations = src.Spec.Tolerations + dst.Spec.ObservabilityAddonSpec = src.Spec.ObservabilityAddonSpec + + // Status + dst.Status.Conditions = src.Status.Conditions + + // +kubebuilder:docs-gen:collapse=rote conversion + return nil +} + +/* +ConvertFrom is expected to modify its receiver to contain the converted object. +Most of the conversion is straightforward copying, except for converting our changed field. +*/ + +// ConvertFrom converts from the Hub version (observabilityv1beta2) to this version. +func (dst *MultiClusterObservability) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*observabilityv1beta2.MultiClusterObservability) + + // TODO(morvencao): convert the AvailabilityConfig field + // dst.Spec.AvailabilityConfig = + + if src.Spec.AdvancedConfig != nil && src.Spec.AdvancedConfig.RetentionConfig != nil { + dst.Spec.RetentionResolutionRaw = src.Spec.AdvancedConfig.RetentionConfig.RetentionResolutionRaw + dst.Spec.RetentionResolution5m = src.Spec.AdvancedConfig.RetentionConfig.RetentionResolution5m + dst.Spec.RetentionResolution1h = src.Spec.AdvancedConfig.RetentionConfig.RetentionResolution1h + } + + dst.Spec.StorageConfig = &StorageConfigObject{ + MetricObjectStorage: src.Spec.StorageConfig.MetricObjectStorage, + StatefulSetStorageClass: src.Spec.StorageConfig.StorageClass, + // How to convert the new storage size to old one? + // StatefulSetSize = + } + + dst.Spec.EnableDownSampling = src.Spec.EnableDownsampling + + /* + The rest of the conversion is pretty rote. + */ + // ObjectMeta + dst.ObjectMeta = src.ObjectMeta + + // Spec + dst.Spec.ImagePullPolicy = src.Spec.ImagePullPolicy + dst.Spec.ImagePullSecret = src.Spec.ImagePullSecret + dst.Spec.NodeSelector = src.Spec.NodeSelector + dst.Spec.Tolerations = src.Spec.Tolerations + dst.Spec.ObservabilityAddonSpec = src.Spec.ObservabilityAddonSpec + + // Status + dst.Status.Conditions = src.Status.Conditions + + // +kubebuilder:docs-gen:collapse=rote conversion + return nil +} diff --git a/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go new file mode 100644 index 000000000..f684dd847 --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go @@ -0,0 +1,145 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + observabilityshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// AvailabilityType ... +type AvailabilityType string + +const ( + // HABasic stands up most app subscriptions with a replicaCount of 1 + HABasic AvailabilityType = "Basic" + // HAHigh stands up most app subscriptions with a replicaCount of 2 + HAHigh AvailabilityType = "High" +) + +// MultiClusterObservabilitySpec defines the desired state of MultiClusterObservability +type MultiClusterObservabilitySpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // ReplicaCount for HA support. Does not affect data stores. + // Enabled will toggle HA support. This will provide better support in cases of failover + // but consumes more resources. Options are: Basic and High (default). + // +optional + // +kubebuilder:default:=High + AvailabilityConfig AvailabilityType `json:"availabilityConfig,omitempty"` + + // Enable or disable the downsample. + // The default value is false. + // This is not recommended as querying long time ranges + // without non-downsampled data is not efficient and useful. + // +optional + // +kubebuilder:default:=false + EnableDownSampling bool `json:"enableDownSampling"` + + // Pull policy of the MultiClusterObservability images + // +optional + // +kubebuilder:default:=Always + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + + // Pull secret of the MultiClusterObservability images + // +optional + // +kubebuilder:default:=multiclusterhub-operator-pull-secret + ImagePullSecret string `json:"imagePullSecret,omitempty"` + + // Spec of NodeSelector + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations causes all components to tolerate any taints. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // How long to retain raw samples in a bucket. + // +optional + // +kubebuilder:default:="5d" + RetentionResolutionRaw string `json:"retentionResolutionRaw,omitempty"` + + // How long to retain samples of resolution 1 (5 minutes) in bucket. + // +optional + // +kubebuilder:default:="14d" + RetentionResolution5m string `json:"retentionResolution5m,omitempty"` + + // How long to retain samples of resolution 2 (1 hour) in bucket. + // +optional + // +kubebuilder:default:="30d" + RetentionResolution1h string `json:"retentionResolution1h,omitempty"` + + // Specifies the storage to be used by Observability + // +required + StorageConfig *StorageConfigObject `json:"storageConfigObject,omitempty"` + + // The ObservabilityAddonSpec defines the global settings for all managed + // clusters which have observability add-on enabled. + // +optional + ObservabilityAddonSpec *observabilityshared.ObservabilityAddonSpec `json:"observabilityAddonSpec,omitempty"` +} + +// StorageConfigObject is the spec of object storage. +type StorageConfigObject struct { + // Object store config secret for metrics + // +required + MetricObjectStorage *observabilityshared.PreConfiguredStorage `json:"metricObjectStorage,omitempty"` + // The amount of storage applied to the Observability stateful sets, i.e. + // Thanos store, Rule, compact and receiver. + // +optional + // +kubebuilder:default:="10Gi" + StatefulSetSize string `json:"statefulSetSize,omitempty"` + + // Specify the storageClass Stateful Sets. This storage class will also + // be used for Object Storage if MetricObjectStorage was configured for + // the system to create the storage. + // +optional + // +kubebuilder:default:=gp2 + StatefulSetStorageClass string `json:"statefulSetStorageClass,omitempty"` +} + +// MultiClusterObservabilityStatus defines the observed state of MultiClusterObservability +type MultiClusterObservabilityStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Represents the status of each deployment + // +optional + Conditions []observabilityshared.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// MultiClusterObservability defines the configuration for the Observability installation on +// Hub and Managed Clusters all through this one custom resource. +// +kubebuilder:pruning:PreserveUnknownFields +// +kubebuilder:resource:path=multiclusterobservabilities,scope=Cluster,shortName=mco +// +operator-sdk:csv:customresourcedefinitions:displayName="MultiClusterObservability" +type MultiClusterObservability struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MultiClusterObservabilitySpec `json:"spec,omitempty"` + Status MultiClusterObservabilityStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// MultiClusterObservabilityList contains a list of MultiClusterObservability +type MultiClusterObservabilityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiClusterObservability `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MultiClusterObservability{}, &MultiClusterObservabilityList{}) +} diff --git a/operators/multiclusterobservability/api/v1beta1/observabilityaddon_types.go b/operators/multiclusterobservability/api/v1beta1/observabilityaddon_types.go new file mode 100644 index 000000000..aed7e07ab --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta1/observabilityaddon_types.go @@ -0,0 +1,57 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + observabilityshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// StatusCondition contains condition information for an observability addon +type StatusCondition struct { + Type string `json:"type"` + Status metav1.ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// ObservabilityAddonStatus defines the observed state of ObservabilityAddon +type ObservabilityAddonStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + Conditions []StatusCondition `json:"conditions"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ObservabilityAddon is the Schema for the observabilityaddon API +// +kubebuilder:resource:path=observabilityaddons,scope=Namespaced,shortName=oba +// +operator-sdk:csv:customresourcedefinitions:displayName="ObservabilityAddon" +type ObservabilityAddon struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec observabilityshared.ObservabilityAddonSpec `json:"spec,omitempty"` + Status ObservabilityAddonStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ObservabilityAddonList contains a list of ObservabilityAddon +type ObservabilityAddonList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ObservabilityAddon `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ObservabilityAddon{}, &ObservabilityAddonList{}) +} diff --git a/operators/multiclusterobservability/api/v1beta1/zz_generated.deepcopy.go b/operators/multiclusterobservability/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8ec0c8192 --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,265 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservability) DeepCopyInto(out *MultiClusterObservability) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservability. +func (in *MultiClusterObservability) DeepCopy() *MultiClusterObservability { + if in == nil { + return nil + } + out := new(MultiClusterObservability) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterObservability) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilityList) DeepCopyInto(out *MultiClusterObservabilityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiClusterObservability, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilityList. +func (in *MultiClusterObservabilityList) DeepCopy() *MultiClusterObservabilityList { + if in == nil { + return nil + } + out := new(MultiClusterObservabilityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterObservabilityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilitySpec) DeepCopyInto(out *MultiClusterObservabilitySpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfigObject) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityAddonSpec != nil { + in, out := &in.ObservabilityAddonSpec, &out.ObservabilityAddonSpec + *out = new(shared.ObservabilityAddonSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilitySpec. +func (in *MultiClusterObservabilitySpec) DeepCopy() *MultiClusterObservabilitySpec { + if in == nil { + return nil + } + out := new(MultiClusterObservabilitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilityStatus) DeepCopyInto(out *MultiClusterObservabilityStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]shared.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilityStatus. +func (in *MultiClusterObservabilityStatus) DeepCopy() *MultiClusterObservabilityStatus { + if in == nil { + return nil + } + out := new(MultiClusterObservabilityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityAddon) DeepCopyInto(out *ObservabilityAddon) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityAddon. +func (in *ObservabilityAddon) DeepCopy() *ObservabilityAddon { + if in == nil { + return nil + } + out := new(ObservabilityAddon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObservabilityAddon) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityAddonList) DeepCopyInto(out *ObservabilityAddonList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObservabilityAddon, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityAddonList. +func (in *ObservabilityAddonList) DeepCopy() *ObservabilityAddonList { + if in == nil { + return nil + } + out := new(ObservabilityAddonList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObservabilityAddonList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObservabilityAddonStatus) DeepCopyInto(out *ObservabilityAddonStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservabilityAddonStatus. +func (in *ObservabilityAddonStatus) DeepCopy() *ObservabilityAddonStatus { + if in == nil { + return nil + } + out := new(ObservabilityAddonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCondition) DeepCopyInto(out *StatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCondition. +func (in *StatusCondition) DeepCopy() *StatusCondition { + if in == nil { + return nil + } + out := new(StatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfigObject) DeepCopyInto(out *StorageConfigObject) { + *out = *in + if in.MetricObjectStorage != nil { + in, out := &in.MetricObjectStorage, &out.MetricObjectStorage + *out = new(shared.PreConfiguredStorage) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfigObject. +func (in *StorageConfigObject) DeepCopy() *StorageConfigObject { + if in == nil { + return nil + } + out := new(StorageConfigObject) + in.DeepCopyInto(out) + return out +} diff --git a/operators/multiclusterobservability/api/v1beta2/groupversion_info.go b/operators/multiclusterobservability/api/v1beta2/groupversion_info.go new file mode 100644 index 000000000..44b4fe03f --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta2/groupversion_info.go @@ -0,0 +1,38 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the observability v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=observability.open-cluster-management.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "observability.open-cluster-management.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_conversion.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_conversion.go new file mode 100644 index 000000000..bfbdd197d --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_conversion.go @@ -0,0 +1,14 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta2 + +/* +Implementing the hub method is pretty easy -- we just have to add an empty +method called `Hub()` to serve as a +[marker](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/conversion#Hub). +We could also just put this inline in our `multiclusterobservability_types.go` file. +*/ + +// Hub marks this type as a conversion hub. +func (*MultiClusterObservability) Hub() {} diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go new file mode 100644 index 000000000..c5213e970 --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go @@ -0,0 +1,223 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + observabilityshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" +) + +// MultiClusterObservabilitySpec defines the desired state of MultiClusterObservability +type MultiClusterObservabilitySpec struct { + // Advanced configurations for observability + // +optional + AdvancedConfig *AdvancedConfig `json:"advanced,omitempty"` + // Enable or disable the downsample. + // +optional + // +kubebuilder:default:=true + EnableDownsampling bool `json:"enableDownsampling"` + // Pull policy of the MultiClusterObservability images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` + // Pull secret of the MultiClusterObservability images + // +optional + ImagePullSecret string `json:"imagePullSecret,omitempty"` + // Spec of NodeSelector + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // Tolerations causes all components to tolerate any taints. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // Specifies the storage to be used by Observability + // +required + StorageConfig *StorageConfig `json:"storageConfig"` + // The ObservabilityAddonSpec defines the global settings for all managed + // clusters which have observability add-on enabled. + // +required + ObservabilityAddonSpec *observabilityshared.ObservabilityAddonSpec `json:"observabilityAddonSpec"` +} + +type AdvancedConfig struct { + // The spec of the data retention configurations + // +optional + RetentionConfig *RetentionConfig `json:"retentionConfig,omitempty"` + // The spec of rbac-query-proxy + // +optional + RBACQueryProxy *CommonSpec `json:"rbacQueryProxy,omitempty"` + // The spec of grafana + // +optional + Grafana *CommonSpec `json:"grafana,omitempty"` + // The spec of alertmanager + // +optional + Alertmanager *CommonSpec `json:"alertmanager,omitempty"` + // Specifies the store memcached + // +optional + StoreMemcached *CacheConfig `json:"storeMemcached,omitempty"` + // Specifies the store memcached + // +optional + QueryFrontendMemcached *CacheConfig `json:"queryFrontendMemcached,omitempty"` + // Spec of observatorium api + // +optional + ObservatoriumAPI *CommonSpec `json:"observatoriumAPI,omitempty"` + // spec for thanos-query-frontend + // +optional + QueryFrontend *CommonSpec `json:"queryFrontend,omitempty"` + // spec for thanos-query + // +optional + Query *CommonSpec `json:"query,omitempty"` + // spec for thanos-compact + // +optional + Compact *CompactSpec `json:"compact,omitempty"` + // spec for thanos-receiver + // +optional + Receive *CommonSpec `json:"receive,omitempty"` + // spec for thanos-rule + // +optional + Rule *RuleSpec `json:"rule,omitempty"` + // spec for thanos-store-shard + // +optional + Store *CommonSpec `json:"store,omitempty"` +} + +type CommonSpec struct { + // Compute Resources required by this component. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // Replicas for this component. + // +optional + Replicas *int32 `json:"replicas,omitempty"` +} + +// Thanos Rule Spec +type RuleSpec struct { + // Evaluation interval + // +optional + EvalInterval string `json:"evalInterval,omitempty"` + + CommonSpec `json:",inline"` +} + +// Thanos Compact Spec +type CompactSpec struct { + // Compute Resources required by the compact. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// CacheConfig is the spec of memcached. +type CacheConfig struct { + // Memory limit of Memcached in megabytes. + // +optional + MemoryLimitMB *int32 `json:"memoryLimitMb,omitempty"` + // Max item size of Memcached (default: 1m, min: 1k, max: 1024m). + // +optional + MaxItemSize string `json:"maxItemSize,omitempty"` + // Max simultaneous connections of Memcached. + // +optional + ConnectionLimit *int32 `json:"connectionLimit,omitempty"` + + CommonSpec `json:",inline"` +} + +// RetentionConfig is the spec of retention configurations. +type RetentionConfig struct { + // How long to retain raw samples in a bucket. + // It applies to --retention.resolution-raw in compact. + // +optional + RetentionResolutionRaw string `json:"retentionResolutionRaw,omitempty"` + // How long to retain samples of resolution 1 (5 minutes) in bucket. + // It applies to --retention.resolution-5m in compact. + // +optional + RetentionResolution5m string `json:"retentionResolution5m,omitempty"` + // How long to retain samples of resolution 2 (1 hour) in bucket. + // It applies to --retention.resolution-1h in compact. + // +optional + RetentionResolution1h string `json:"retentionResolution1h,omitempty"` + // How long to retain raw samples in a local disk. It applies to rule/receive: + // --tsdb.retention in receive + // --tsdb.retention in rule + // +optional + RetentionInLocal string `json:"retentionInLocal,omitempty"` + // configure --delete-delay in compact + // Time before a block marked for deletion is deleted from bucket. + // +optional + DeleteDelay string `json:"deleteDelay,omitempty"` + // configure --tsdb.block-duration in rule (Block duration for TSDB block) + // +optional + BlockDuration string `json:"blockDuration,omitempty"` +} + +// StorageConfig is the spec of object storage. +type StorageConfig struct { + // Object store config secret for metrics + // +required + MetricObjectStorage *observabilityshared.PreConfiguredStorage `json:"metricObjectStorage"` + // Specify the storageClass Stateful Sets. This storage class will also + // be used for Object Storage if MetricObjectStorage was configured for + // the system to create the storage. + // +optional + // +kubebuilder:default:=gp2 + StorageClass string `json:"storageClass,omitempty"` + // The amount of storage applied to alertmanager stateful sets, + // +optional + // +kubebuilder:default:="1Gi" + AlertmanagerStorageSize string `json:"alertmanagerStorageSize,omitempty"` + // The amount of storage applied to thanos rule stateful sets, + // +optional + // +kubebuilder:default:="1Gi" + RuleStorageSize string `json:"ruleStorageSize,omitempty"` + // The amount of storage applied to thanos compact stateful sets, + // +optional + // +kubebuilder:default:="100Gi" + CompactStorageSize string `json:"compactStorageSize,omitempty"` + // The amount of storage applied to thanos receive stateful sets, + // +optional + // +kubebuilder:default:="100Gi" + ReceiveStorageSize string `json:"receiveStorageSize,omitempty"` + // The amount of storage applied to thanos store stateful sets, + // +optional + // +kubebuilder:default:="10Gi" + StoreStorageSize string `json:"storeStorageSize,omitempty"` +} + +// MultiClusterObservabilityStatus defines the observed state of MultiClusterObservability +type MultiClusterObservabilityStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + + // Represents the status of each deployment + // +optional + Conditions []observabilityshared.Condition `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// MultiClusterObservability defines the configuration for the Observability installation on +// Hub and Managed Clusters all through this one custom resource. +// +kubebuilder:pruning:PreserveUnknownFields +// +kubebuilder:resource:path=multiclusterobservabilities,scope=Cluster,shortName=mco +// +operator-sdk:csv:customresourcedefinitions:displayName="MultiClusterObservability" +type MultiClusterObservability struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MultiClusterObservabilitySpec `json:"spec,omitempty"` + Status MultiClusterObservabilityStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true +// MultiClusterObservabilityList contains a list of MultiClusterObservability +type MultiClusterObservabilityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []MultiClusterObservability `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MultiClusterObservability{}, &MultiClusterObservabilityList{}) +} diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go new file mode 100644 index 000000000..8b5c54a2d --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go @@ -0,0 +1,208 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package v1beta2 + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// +kubebuilder:docs-gen:collapse=Go imports + +// log is for logging in this package. +var multiclusterobservabilitylog = logf.Log.WithName("multiclusterobservability-resource") + +var kubeClient kubernetes.Interface + +func (mco *MultiClusterObservability) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(mco). + Complete() +} + +// +kubebuilder:webhook:path=/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability,mutating=false,failurePolicy=fail,sideEffects=None,groups=observability.open-cluster-management.io,resources=multiclusterobservabilities,verbs=create;update,versions=v1beta2,name=vmulticlusterobservability.observability.open-cluster-management.io,admissionReviewVersions={v1,v1beta1} + +var _ webhook.Validator = &MultiClusterObservability{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (mco *MultiClusterObservability) ValidateCreate() error { + multiclusterobservabilitylog.Info("validate create", "name", mco.Name) + return mco.validateMultiClusterObservability(nil) +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (mco *MultiClusterObservability) ValidateUpdate(old runtime.Object) error { + multiclusterobservabilitylog.Info("validate update", "name", mco.Name) + return mco.validateMultiClusterObservability(old) +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (mco *MultiClusterObservability) ValidateDelete() error { + multiclusterobservabilitylog.Info("validate delete", "name", mco.Name) + + // no validation logic upon object delete. + return nil +} + +// validateMultiClusterObservability validates the name and the spec of the MultiClusterObservability CR. +func (mco *MultiClusterObservability) validateMultiClusterObservability(old runtime.Object) error { + var allErrs field.ErrorList + if err := mco.validateMultiClusterObservabilityName(); err != nil { + allErrs = append(allErrs, err) + } + if err := mco.validateMultiClusterObservabilitySpec(); err != nil { + allErrs = append(allErrs, err) + } + + // validate the MultiClusterObservability CR update + if old != nil { + if errlists := mco.validateUpdateMultiClusterObservabilitySpec(old); errlists != nil { + allErrs = append(allErrs, errlists...) + } + } + if len(allErrs) == 0 { + return nil + } + + return apierrors.NewInvalid( + schema.GroupKind{Group: "observability.open-cluster-management.io", Kind: "MultiClusterObservability"}, + mco.Name, allErrs) +} + +// validateMultiClusterObservabilityName validates the name of the MultiClusterObservability CR. +// Validating the length of a string field can be done declaratively by the validation schema. +// But the `ObjectMeta.Name` field is defined in a shared package under the apimachinery repo, +// so we can't declaratively validate it using the validation schema. +func (mco *MultiClusterObservability) validateMultiClusterObservabilityName() *field.Error { + return nil +} + +// validateMultiClusterObservabilitySpec validates the spec of the MultiClusterObservability CR. +// notice that some fields are declaratively validated by OpenAPI schema with `// +kubebuilder:validation` in the type definition. +func (mco *MultiClusterObservability) validateMultiClusterObservabilitySpec() *field.Error { + // The field helpers from the kubernetes API machinery help us return nicely structured validation errors. + return nil +} + +// validateUpdateMultiClusterObservabilitySpec validates the update of the MultiClusterObservability CR. +func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilitySpec(old runtime.Object) field.ErrorList { + return mco.validateUpdateMultiClusterObservabilityStorageSize(old) +} + +// validateUpdateMultiClusterObservabilityStorageSize validates the update of storage size in the MultiClusterObservability CR. +func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilityStorageSize(old runtime.Object) field.ErrorList { + var errs field.ErrorList + oldMCO := old.(*MultiClusterObservability) + kubeClient, err := createOrGetKubeClient() + if err != nil { + return append(errs, field.InternalError(nil, err)) + } + + selectedSC, err := getSelectedStorageClassForMultiClusterObservability(kubeClient, oldMCO) + if err != nil { + return append(errs, field.InternalError(nil, err)) + } + + selectedSCAllowResize, err := storageClassAllowVolumeExpansion(kubeClient, selectedSC) + if err != nil { + return append(errs, field.InternalError(nil, err)) + } + + // if the selected storage class is allowed resize, then return with no error + if selectedSCAllowResize { + return nil + } + + mcoOldConfig := oldMCO.Spec.StorageConfig + mcoNewConfig := mco.Spec.StorageConfig + if mcoOldConfig != nil && mcoNewConfig != nil { + storageConfigFieldPath := field.NewPath("spec").Child("storageConfig") + storageForbiddenResize := "is forbidden to update." + if mcoOldConfig.AlertmanagerStorageSize != mcoNewConfig.AlertmanagerStorageSize { + errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("alertmanagerStorageSize"), storageForbiddenResize)) + } + if mcoOldConfig.CompactStorageSize != mcoNewConfig.CompactStorageSize { + errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("compactStorageSize"), storageForbiddenResize)) + } + if mcoOldConfig.ReceiveStorageSize != mcoNewConfig.ReceiveStorageSize { + errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("receiveStorageSize"), storageForbiddenResize)) + } + if mcoOldConfig.StoreStorageSize != mcoNewConfig.StoreStorageSize { + errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("storeStorageSize"), storageForbiddenResize)) + } + if mcoOldConfig.RuleStorageSize != mcoNewConfig.RuleStorageSize { + errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("ruleStorageSize"), storageForbiddenResize)) + } + return errs + } + + return nil +} + +// createOrGetKubeClient creates or gets the existing kubeClient +func createOrGetKubeClient() (kubernetes.Interface, error) { + if kubeClient != nil { + return kubeClient, nil + } + kubeClient, err := kubernetes.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + return nil, err + } + return kubeClient, nil +} + +// getSelectedStorageClassForMultiClusterObservability get secected for the MultiClusterObservability CR +func getSelectedStorageClassForMultiClusterObservability(c kubernetes.Interface, mco *MultiClusterObservability) (string, error) { + scInCR := "" + if mco.Spec.StorageConfig != nil { + scInCR = mco.Spec.StorageConfig.StorageClass + } + + scList, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + scMatch := false + defaultSC := "" + for _, sc := range scList.Items { + if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" { + defaultSC = sc.Name + } + if sc.Name == scInCR { + scMatch = true + } + } + expectedSC := defaultSC + if scMatch { + expectedSC = scInCR + } + + return expectedSC, nil +} + +// storageClassAllowVolumeExpansion check if the storageclass allow volume expansion +func storageClassAllowVolumeExpansion(c kubernetes.Interface, name string) (bool, error) { + sc, err := c.StorageV1().StorageClasses().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + + scAllowVolumeExpansion := false + // AllowVolumeExpansion may be omited with default false value + if sc.AllowVolumeExpansion != nil { + scAllowVolumeExpansion = *sc.AllowVolumeExpansion + } + + return scAllowVolumeExpansion, nil +} diff --git a/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go b/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 000000000..eca667ab7 --- /dev/null +++ b/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,355 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) { + *out = *in + if in.RetentionConfig != nil { + in, out := &in.RetentionConfig, &out.RetentionConfig + *out = new(RetentionConfig) + **out = **in + } + if in.RBACQueryProxy != nil { + in, out := &in.RBACQueryProxy, &out.RBACQueryProxy + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.Grafana != nil { + in, out := &in.Grafana, &out.Grafana + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.Alertmanager != nil { + in, out := &in.Alertmanager, &out.Alertmanager + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.StoreMemcached != nil { + in, out := &in.StoreMemcached, &out.StoreMemcached + *out = new(CacheConfig) + (*in).DeepCopyInto(*out) + } + if in.QueryFrontendMemcached != nil { + in, out := &in.QueryFrontendMemcached, &out.QueryFrontendMemcached + *out = new(CacheConfig) + (*in).DeepCopyInto(*out) + } + if in.ObservatoriumAPI != nil { + in, out := &in.ObservatoriumAPI, &out.ObservatoriumAPI + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.QueryFrontend != nil { + in, out := &in.QueryFrontend, &out.QueryFrontend + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.Query != nil { + in, out := &in.Query, &out.Query + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.Compact != nil { + in, out := &in.Compact, &out.Compact + *out = new(CompactSpec) + (*in).DeepCopyInto(*out) + } + if in.Receive != nil { + in, out := &in.Receive, &out.Receive + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } + if in.Rule != nil { + in, out := &in.Rule, &out.Rule + *out = new(RuleSpec) + (*in).DeepCopyInto(*out) + } + if in.Store != nil { + in, out := &in.Store, &out.Store + *out = new(CommonSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfig. +func (in *AdvancedConfig) DeepCopy() *AdvancedConfig { + if in == nil { + return nil + } + out := new(AdvancedConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheConfig) DeepCopyInto(out *CacheConfig) { + *out = *in + if in.MemoryLimitMB != nil { + in, out := &in.MemoryLimitMB, &out.MemoryLimitMB + *out = new(int32) + **out = **in + } + if in.ConnectionLimit != nil { + in, out := &in.ConnectionLimit, &out.ConnectionLimit + *out = new(int32) + **out = **in + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheConfig. +func (in *CacheConfig) DeepCopy() *CacheConfig { + if in == nil { + return nil + } + out := new(CacheConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. +func (in *CommonSpec) DeepCopy() *CommonSpec { + if in == nil { + return nil + } + out := new(CommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CompactSpec) DeepCopyInto(out *CompactSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompactSpec. +func (in *CompactSpec) DeepCopy() *CompactSpec { + if in == nil { + return nil + } + out := new(CompactSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservability) DeepCopyInto(out *MultiClusterObservability) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservability. +func (in *MultiClusterObservability) DeepCopy() *MultiClusterObservability { + if in == nil { + return nil + } + out := new(MultiClusterObservability) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterObservability) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilityList) DeepCopyInto(out *MultiClusterObservabilityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MultiClusterObservability, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilityList. +func (in *MultiClusterObservabilityList) DeepCopy() *MultiClusterObservabilityList { + if in == nil { + return nil + } + out := new(MultiClusterObservabilityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MultiClusterObservabilityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilitySpec) DeepCopyInto(out *MultiClusterObservabilitySpec) { + *out = *in + if in.AdvancedConfig != nil { + in, out := &in.AdvancedConfig, &out.AdvancedConfig + *out = new(AdvancedConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfig) + (*in).DeepCopyInto(*out) + } + if in.ObservabilityAddonSpec != nil { + in, out := &in.ObservabilityAddonSpec, &out.ObservabilityAddonSpec + *out = new(shared.ObservabilityAddonSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilitySpec. +func (in *MultiClusterObservabilitySpec) DeepCopy() *MultiClusterObservabilitySpec { + if in == nil { + return nil + } + out := new(MultiClusterObservabilitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultiClusterObservabilityStatus) DeepCopyInto(out *MultiClusterObservabilityStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]shared.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultiClusterObservabilityStatus. +func (in *MultiClusterObservabilityStatus) DeepCopy() *MultiClusterObservabilityStatus { + if in == nil { + return nil + } + out := new(MultiClusterObservabilityStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionConfig) DeepCopyInto(out *RetentionConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionConfig. +func (in *RetentionConfig) DeepCopy() *RetentionConfig { + if in == nil { + return nil + } + out := new(RetentionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleSpec) DeepCopyInto(out *RuleSpec) { + *out = *in + in.CommonSpec.DeepCopyInto(&out.CommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleSpec. +func (in *RuleSpec) DeepCopy() *RuleSpec { + if in == nil { + return nil + } + out := new(RuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfig) DeepCopyInto(out *StorageConfig) { + *out = *in + if in.MetricObjectStorage != nil { + in, out := &in.MetricObjectStorage, &out.MetricObjectStorage + *out = new(shared.PreConfiguredStorage) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfig. +func (in *StorageConfig) DeepCopy() *StorageConfig { + if in == nil { + return nil + } + out := new(StorageConfig) + in.DeepCopyInto(out) + return out +} diff --git a/operators/multiclusterobservability/bundle.Dockerfile b/operators/multiclusterobservability/bundle.Dockerfile new file mode 100644 index 000000000..9a3def2c6 --- /dev/null +++ b/operators/multiclusterobservability/bundle.Dockerfile @@ -0,0 +1,15 @@ +FROM scratch + +LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 +LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ +LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ +LABEL operators.operatorframework.io.bundle.package.v1=multicluster-observability-operator +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.4.2 +LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 +LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ +LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 +COPY bundle/manifests /manifests/ +COPY bundle/metadata /metadata/ +COPY bundle/tests/scorecard /tests/scorecard/ diff --git a/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml b/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml new file mode 100644 index 000000000..97b676551 --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml @@ -0,0 +1,1781 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: observatoria.core.observatorium.io +spec: + group: core.observatorium.io + names: + kind: Observatorium + listKind: ObservatoriumList + plural: observatoria + singular: observatorium + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Observatorium is the Schema for the observatoria API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservatoriumSpec defines the desired state of Observatorium + properties: + affinity: + description: Affinity causes all components to be scheduled on nodes with matching rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + api: + description: API + properties: + image: + description: API image + type: string + rbac: + description: RBAC is an RBAC configuration for the Observatorium API. + properties: + roleBindings: + description: RoleBindings is a slice of Observatorium API role bindings. + items: + description: RBACRoleBinding binds a set of roles to a set of subjects. + properties: + name: + description: Name is the name of the role binding. + type: string + roles: + description: Roles is a list of roles that will be bound. + items: + type: string + type: array + subjects: + description: Subjects is a list of subjects who will be given access to the specified roles. + items: + description: Subject represents a subject to which an RBAC role can be bound. + properties: + kind: + description: SubjectKind is a kind of Observatorium subject. + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles is a slice of Observatorium API roles. + items: + description: RBACRole describes a set of permissions to interact with a tenant. + properties: + name: + description: Name is the name of the role. + type: string + permissions: + description: Permissions is a list of permissions that will be granted. + items: + description: Permission is an Observatorium RBAC permission. + type: string + type: array + resources: + description: Resources is a list of resources to which access will be granted. + items: + type: string + type: array + tenants: + description: Tenants is a list of tenants whose resources will be considered. + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + required: + - roleBindings + - roles + type: object + replicas: + description: Number of API replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + tenants: + description: Tenants is a slice of tenants for the Observatorium API. + items: + description: APITenant represents a tenant in the Observatorium API. + properties: + id: + type: string + mTLS: + description: TenantMTLS represents the mTLS configuration for an Observatorium API tenant. + properties: + caKey: + type: string + configMapName: + type: string + secretName: + type: string + required: + - caKey + type: object + name: + type: string + oidc: + description: TenantOIDC represents the OIDC configuration for an Observatorium API tenant. + properties: + caKey: + type: string + clientID: + type: string + clientSecret: + type: string + configMapName: + type: string + issuerCAPath: + type: string + issuerURL: + type: string + redirectURL: + type: string + usernameClaim: + type: string + required: + - clientID + - issuerURL + type: object + required: + - id + - name + type: object + type: array + tls: + description: TLS configuration for the Observatorium API. + properties: + caKey: + type: string + certKey: + type: string + configMapName: + type: string + keyKey: + type: string + reloadInterval: + type: string + secretName: + type: string + serverName: + type: string + required: + - certKey + - keyKey + - secretName + type: object + version: + description: Version describes the version of API to use. + type: string + required: + - rbac + - tenants + type: object + envVars: + additionalProperties: + type: string + description: EnvVars define the common environment variables. EnvVars apply to thanos compact/receive/rule/store components + type: object + hashrings: + description: Hashrings describes a list of Hashrings + items: + properties: + hashring: + description: Thanos Hashring name + type: string + tenants: + description: Tenants describes a lists of tenants. + items: + type: string + type: array + required: + - hashring + type: object + type: array + loki: + description: Loki + properties: + image: + description: Loki image + type: string + replicas: + additionalProperties: + format: int32 + type: integer + description: Loki replicas per component + type: object + version: + description: Version of Loki image to be deployed + type: string + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - image + - volumeClaimTemplate + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector causes all components to be scheduled on nodes with matching labels. + type: object + objectStorageConfig: + description: Objest Storage Configuration + properties: + loki: + description: Object Store Config Secret for Loki + properties: + accessKeyIdKey: + description: Object Store Config key for AWS_ACCESS_KEY_ID + type: string + bucketsKey: + description: Object Store Config key for S3_BUCKETS + type: string + endpointKey: + description: Object Store Config key for S3_URL + type: string + regionKey: + description: Object Store Config key for S3_REGION + type: string + secretAccessKeyKey: + description: Object Store Config key for AWS_SECRET_ACCESS_KEY + type: string + secretName: + description: Object Store Config Secret Name + type: string + required: + - secretName + type: object + thanos: + description: Object Store Config Secret for Thanos + properties: + key: + description: Object Store Config key + type: string + name: + description: Object Store Config Secret Name + type: string + required: + - key + - name + type: object + required: + - thanos + type: object + pullSecret: + description: Pull secret used to pull the images. + type: string + securityContext: + description: Security options the pod should run with. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + thanos: + description: Thanos Spec + properties: + compact: + description: Thanos CompactSpec + properties: + deleteDelay: + description: Time before a block marked for deletion is deleted from bucket + type: string + enableDownsampling: + description: EnableDownsampling enables downsampling. + type: boolean + replicas: + description: Number of Compact replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retentionResolution1h: + description: RetentionResolutionRaw + type: string + retentionResolution5m: + description: RetentionResolutionRaw + type: string + retentionResolutionRaw: + description: RetentionResolutionRaw + type: string + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - retentionResolution1h + - retentionResolution5m + - retentionResolutionRaw + - volumeClaimTemplate + type: object + image: + description: Thanos image + type: string + query: + description: Query + properties: + lookbackDelta: + description: The maximum lookback duration for retrieving metrics during expression evaluations. + type: string + replicas: + description: Number of Query replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + type: object + queryFrontend: + description: Thanos QueryFrontend + properties: + cache: + description: Memcached spec for QueryFrontend + properties: + connectionLimit: + description: Max simultaneous connections + format: int32 + type: integer + exporterImage: + description: Memcached Prometheus Exporter image + type: string + exporterResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + exporterVersion: + description: Version of Memcached Prometheus Exporter image to be deployed. + type: string + image: + description: Memcached image + type: string + maxItemSize: + description: 'Max item size (default: 1m, min: 1k, max: 1024m)' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Number of Memcached replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version of Memcached image to be deployed. + type: string + type: object + replicas: + description: Number of Query Frontend replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + type: object + receiveController: + description: Thanos Receive Controller Spec + properties: + image: + description: Receive Controller image + type: string + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version describes the version of Thanos receive controller to use. + type: string + type: object + receivers: + description: Thanos ThanosPersistentSpec + properties: + replicas: + description: Number of Receiver replicas. + format: int32 + type: integer + replicationFactor: + description: ReplicationFactor defines the number of copies of every time-series + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: How long to retain raw samples on local storage + type: string + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + rule: + description: Thanos RulerSpec + properties: + alertmanagerConfigFile: + description: AlertmanagerConfigFile + properties: + key: + description: Alertmanager ConfigMap key + type: string + name: + description: Alertmanager ConfigMap Name + type: string + required: + - key + - name + type: object + alertmanagerURLs: + description: AlertmanagerURLs + items: + type: string + type: array + blockDuration: + description: Block duration for TSDB block + type: string + evalInterval: + description: Evaluation interval + type: string + extraVolumeMounts: + description: ExtraVolumeMounts + items: + properties: + key: + description: File name for the mount + type: string + mountPath: + description: Volume mount path in the pod + type: string + name: + description: Resource name for the volume mount source + type: string + type: + description: Voume mount type, configMap or secret + type: string + required: + - key + - mountPath + - name + - type + type: object + type: array + reloaderImage: + description: ReloaderImage is an image of configmap reloader + type: string + reloaderResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + replicas: + description: Number of Rule replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Block retention time on local disk + type: string + rulesConfig: + description: RulesConfig configures rules from the configmaps + items: + properties: + key: + description: Rule ConfigMap key + type: string + name: + description: Rule ConfigMap Name + type: string + required: + - key + - name + type: object + type: array + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + store: + description: Thanos StoreSpec + properties: + cache: + description: Memcached spec for Store + properties: + connectionLimit: + description: Max simultaneous connections + format: int32 + type: integer + exporterImage: + description: Memcached Prometheus Exporter image + type: string + exporterResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + exporterVersion: + description: Version of Memcached Prometheus Exporter image to be deployed. + type: string + image: + description: Memcached image + type: string + maxItemSize: + description: 'Max item size (default: 1m, min: 1k, max: 1024m)' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Number of Memcached replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version of Memcached image to be deployed. + type: string + type: object + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + shards: + format: int32 + type: integer + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + version: + description: Version of Thanos image to be deployed. + type: string + required: + - compact + - receivers + - rule + - store + type: object + tolerations: + description: Tolerations causes all components to tolerate specified taints. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - hashrings + - objectStorageConfig + type: object + status: + description: ObservatoriumStatus defines the observed state of Observatorium + properties: + conditions: + description: Represents the status of Observatorium + items: + properties: + currentStatus: + type: string + lastTransitionTime: + format: date-time + type: string + name: + type: string + required: + - currentStatus + - lastTransitionTime + - name + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/bundle/manifests/manager-config_v1_configmap.yaml b/operators/multiclusterobservability/bundle/manifests/manager-config_v1_configmap.yaml new file mode 100644 index 000000000..8e47b3ecb --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/manager-config_v1_configmap.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + kind: ControllerManagerConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8383 + webhook: + port: 9443 + leaderElection: + leaderElect: true + resourceName: b9d51391.open-cluster-management.io +kind: ConfigMap +metadata: + name: manager-config diff --git a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml new file mode 100644 index 000000000..4f45c7ecd --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml @@ -0,0 +1,549 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "observability.open-cluster-management.io/v1beta1", + "kind": "MultiClusterObservability", + "metadata": { + "name": "observability" + }, + "spec": { + "observabilityAddonSpec": {}, + "storageConfigObject": { + "metricObjectStorage": { + "key": "thanos.yaml", + "name": "thanos-object-storage" + } + } + } + }, + { + "apiVersion": "observability.open-cluster-management.io/v1beta1", + "kind": "ObservabilityAddon", + "metadata": { + "name": "observability-addon" + }, + "spec": { + "enableMetrics": true, + "interval": 30 + } + }, + { + "apiVersion": "observability.open-cluster-management.io/v1beta2", + "kind": "MultiClusterObservability", + "metadata": { + "name": "observability" + }, + "spec": { + "observabilityAddonSpec": {}, + "storageConfig": { + "metricObjectStorage": { + "key": "thanos.yaml", + "name": "thanos-object-storage" + } + } + } + } + ] + capabilities: Basic Install + operators.operatorframework.io/builder: operator-sdk-v1.4.2 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io/internal-objects: '["observatoria.core.observatorium.io","observabilityaddons.observability.open-cluster-management.io"]' + name: multicluster-observability-operator.v0.1.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + displayName: MultiClusterObservability + kind: MultiClusterObservability + name: multiclusterobservabilities.observability.open-cluster-management.io + version: v1beta1 + - description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + displayName: MultiClusterObservability + kind: MultiClusterObservability + name: multiclusterobservabilities.observability.open-cluster-management.io + version: v1beta2 + - description: ObservabilityAddon is the Schema for the observabilityaddon API + displayName: ObservabilityAddon + kind: ObservabilityAddon + name: observabilityaddons.observability.open-cluster-management.io + version: v1beta1 + - kind: Observatorium + name: observatoria.core.observatorium.io + version: v1alpha1 + description: The multicluster-observability-operator is a component of ACM observability feature. It is designed to install into Hub Cluster. + displayName: Multicluster Observability Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + - namespaces + - nodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - watch + - get + - list + - apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resourceNames: + - multicluster-observability-operator + resources: + - deployments/finalizers + verbs: + - update + - apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get + - apiGroups: + - monitor.open-cluster-management.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - observability.open-cluster-management.io + resources: + - '*' + - multiclusterobservabilities + - endpointmonitorings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - '*' + verbs: + - '*' + - apiGroups: + - core.observatorium.io + resources: + - observatoria + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - integreatly.org + resources: + - grafanas + - grafanas/status + - grafanas/finalizers + - grafanadashboards + - grafanadashboards/status + - grafanadatasources + - grafanadatasources/status + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch + - apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + - routes/status + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch + - create + - apiGroups: + - cluster.open-cluster-management.io + resources: + - manageclusters + verbs: + - get + - list + - watch + - apiGroups: + - work.open-cluster-management.io + resources: + - manifestworks + verbs: + - '*' + - apiGroups: + - config.openshift.io + resources: + - '*' + - infrastructures + verbs: + - '*' + - apiGroups: + - operator.openshift.io + resources: + - ingresscontrollers + verbs: + - get + - list + - watch + - apiGroups: + - certmanager.k8s.io + resources: + - '*' + verbs: + - '*' + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - delete + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - addon.open-cluster-management.io + resources: + - clustermanagementaddons + verbs: + - create + - update + - get + - delete + - list + - watch + - apiGroups: + - addon.open-cluster-management.io + resources: + - managedclusteraddons + - managedclusteraddons/status + verbs: + - watch + - create + - update + - delete + - get + - list + - apiGroups: + - migration.k8s.io + resources: + - storageversionmigrations + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + verbs: + - watch + - get + - list + - apiGroups: + - operator.open-cluster-management.io + resources: + - multiclusterhubs + verbs: + - watch + - get + - list + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + - certificatesigningrequests/approval + verbs: + - update + - apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/kube-apiserver-client + - open-cluster-management.io/observability-signer + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - update + - apiGroups: + - certificates.k8s.io + resourceNames: + - open-cluster-management.io/observability-signer + resources: + - signers + verbs: + - sign + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - imageregistry.open-cluster-management.io + resources: + - managedclusterimageregistries + verbs: + - get + - list + - watch + serviceAccountName: multicluster-observability-operator + deployments: + - name: multicluster-observability-operator + spec: + replicas: 1 + selector: + matchLabels: + name: multicluster-observability-operator + strategy: {} + template: + metadata: + labels: + name: multicluster-observability-operator + spec: + containers: + - args: + - -leader-elect + command: + - mco-operator + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_NAME + value: multicluster-observability-operator + - name: TEMPLATES_PATH + value: /usr/local/manifests + - name: SPOKE_NAMESPACE + value: open-cluster-management-addon-observability + image: quay.io/stolostron/multicluster-observability-operator:latest + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - /usr/local/bin/prestop.sh + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: multicluster-observability-operator + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8383 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 600m + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + serviceAccountName: multicluster-observability-operator + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: multicluster-observability-operator-webhook-server-cert + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - MultiClusterObservability + links: + - name: Multicluster Observability Operator + url: https://multicluster-observability-operator.domain + maintainers: + - email: acm-contact@redhat.com + name: acm-contact + maturity: alpha + provider: + name: Red Hat, Inc + url: https://github.com/stolostron/multicluster-observability-operator + version: 0.1.0 + webhookdefinitions: + - admissionReviewVersions: + - v1 + - v1beta1 + containerPort: 443 + deploymentName: multicluster-observability-operator + failurePolicy: Fail + generateName: vmulticlusterobservability.observability.open-cluster-management.io + rules: + - apiGroups: + - observability.open-cluster-management.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - multiclusterobservabilities + sideEffects: None + targetPort: 9443 + type: ValidatingAdmissionWebhook + webhookPath: /validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability diff --git a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-webhook-service_v1_service.yaml b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-webhook-service_v1_service.yaml new file mode 100644 index 000000000..769a15805 --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-webhook-service_v1_service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: multicluster-observability-operator-webhook-server-cert + creationTimestamp: null + labels: + name: multicluster-observability-operator + name: multicluster-observability-webhook-service +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + name: multicluster-observability-operator +status: + loadBalancer: {} diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml new file mode 100644 index 000000000..cb8768619 --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -0,0 +1,824 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + service.beta.openshift.io/inject-cabundle: "true" + creationTimestamp: null + name: multiclusterobservabilities.observability.open-cluster-management.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + caBundle: XG4= + service: + name: multicluster-observability-webhook-service + namespace: open-cluster-management + path: /convert + port: 443 + conversionReviewVersions: + - v1 + - v1beta1 + group: observability.open-cluster-management.io + names: + kind: MultiClusterObservability + listKind: MultiClusterObservabilityList + plural: multiclusterobservabilities + shortNames: + - mco + singular: multiclusterobservability + scope: Cluster + versions: + - deprecated: true + deprecationWarning: observability.open-cluster-management.io/v1beta1 MultiClusterObservability is deprecated in v2.3+, unavailable in v2.6+; use observability.open-cluster-management.io/v1beta2 MultiClusterObservability + name: v1beta1 + schema: + openAPIV3Schema: + description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MultiClusterObservabilitySpec defines the desired state of MultiClusterObservability + properties: + availabilityConfig: + default: High + description: 'ReplicaCount for HA support. Does not affect data stores. Enabled will toggle HA support. This will provide better support in cases of failover but consumes more resources. Options are: Basic and High (default).' + type: string + enableDownSampling: + default: false + description: Enable or disable the downsample. The default value is false. This is not recommended as querying long time ranges without non-downsampled data is not efficient and useful. + type: boolean + imagePullPolicy: + default: Always + description: Pull policy of the MultiClusterObservability images + type: string + imagePullSecret: + default: multiclusterhub-operator-pull-secret + description: Pull secret of the MultiClusterObservability images + type: string + nodeSelector: + additionalProperties: + type: string + description: Spec of NodeSelector + type: object + observabilityAddonSpec: + description: The ObservabilityAddonSpec defines the global settings for all managed clusters which have observability add-on enabled. + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics to hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + retentionResolution1h: + default: 30d + description: How long to retain samples of resolution 2 (1 hour) in bucket. + type: string + retentionResolution5m: + default: 14d + description: How long to retain samples of resolution 1 (5 minutes) in bucket. + type: string + retentionResolutionRaw: + default: 5d + description: How long to retain raw samples in a bucket. + type: string + storageConfigObject: + description: Specifies the storage to be used by Observability + properties: + metricObjectStorage: + description: Object store config secret for metrics + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - key + - name + type: object + statefulSetSize: + default: 10Gi + description: The amount of storage applied to the Observability stateful sets, i.e. Thanos store, Rule, compact and receiver. + type: string + statefulSetStorageClass: + default: gp2 + description: "\tSpecify the storageClass Stateful Sets. This storage class will also be used for Object Storage if MetricObjectStorage was configured for the system to create the storage." + type: string + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + status: + description: MultiClusterObservabilityStatus defines the observed state of MultiClusterObservability + properties: + conditions: + description: Represents the status of each deployment + items: + description: Condition is from metav1.Condition. Cannot use it directly because the upgrade issue. Have to mark LastTransitionTime and Status as optional. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - message + - reason + - type + type: object + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: false + subresources: + status: {} + - name: v1beta2 + schema: + openAPIV3Schema: + description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MultiClusterObservabilitySpec defines the desired state of MultiClusterObservability + properties: + advanced: + description: Advanced configurations for observability + properties: + alertmanager: + description: The spec of alertmanager + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + compact: + description: spec for thanos-compact + properties: + resources: + description: Compute Resources required by the compact. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + grafana: + description: The spec of grafana + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + observatoriumAPI: + description: Spec of observatorium api + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + query: + description: spec for thanos-query + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + queryFrontend: + description: spec for thanos-query-frontend + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + queryFrontendMemcached: + description: Specifies the store memcached + properties: + connectionLimit: + description: Max simultaneous connections of Memcached. + format: int32 + type: integer + maxItemSize: + description: 'Max item size of Memcached (default: 1m, min: 1k, max: 1024m).' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + rbacQueryProxy: + description: The spec of rbac-query-proxy + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + receive: + description: spec for thanos-receiver + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + retentionConfig: + description: The spec of the data retention configurations + properties: + blockDuration: + description: configure --tsdb.block-duration in rule (Block duration for TSDB block) + type: string + deleteDelay: + description: configure --delete-delay in compact Time before a block marked for deletion is deleted from bucket. + type: string + retentionInLocal: + description: 'How long to retain raw samples in a local disk. It applies to rule/receive: --tsdb.retention in receive --tsdb.retention in rule' + type: string + retentionResolution1h: + description: How long to retain samples of resolution 2 (1 hour) in bucket. It applies to --retention.resolution-1h in compact. + type: string + retentionResolution5m: + description: How long to retain samples of resolution 1 (5 minutes) in bucket. It applies to --retention.resolution-5m in compact. + type: string + retentionResolutionRaw: + description: How long to retain raw samples in a bucket. It applies to --retention.resolution-raw in compact. + type: string + type: object + rule: + description: spec for thanos-rule + properties: + evalInterval: + description: Evaluation interval + type: string + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + store: + description: spec for thanos-store-shard + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + storeMemcached: + description: Specifies the store memcached + properties: + connectionLimit: + description: Max simultaneous connections of Memcached. + format: int32 + type: integer + maxItemSize: + description: 'Max item size of Memcached (default: 1m, min: 1k, max: 1024m).' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + enableDownsampling: + default: true + description: Enable or disable the downsample. + type: boolean + imagePullPolicy: + description: Pull policy of the MultiClusterObservability images + type: string + imagePullSecret: + description: Pull secret of the MultiClusterObservability images + type: string + nodeSelector: + additionalProperties: + type: string + description: Spec of NodeSelector + type: object + observabilityAddonSpec: + description: The ObservabilityAddonSpec defines the global settings for all managed clusters which have observability add-on enabled. + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics to hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + storageConfig: + description: Specifies the storage to be used by Observability + properties: + alertmanagerStorageSize: + default: 1Gi + description: The amount of storage applied to alertmanager stateful sets, + type: string + compactStorageSize: + default: 100Gi + description: The amount of storage applied to thanos compact stateful sets, + type: string + metricObjectStorage: + description: Object store config secret for metrics + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - key + - name + type: object + receiveStorageSize: + default: 100Gi + description: The amount of storage applied to thanos receive stateful sets, + type: string + ruleStorageSize: + default: 1Gi + description: The amount of storage applied to thanos rule stateful sets, + type: string + storageClass: + default: gp2 + description: Specify the storageClass Stateful Sets. This storage class will also be used for Object Storage if MetricObjectStorage was configured for the system to create the storage. + type: string + storeStorageSize: + default: 10Gi + description: The amount of storage applied to thanos store stateful sets, + type: string + required: + - metricObjectStorage + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - observabilityAddonSpec + - storageConfig + type: object + status: + description: MultiClusterObservabilityStatus defines the observed state of MultiClusterObservability + properties: + conditions: + description: Represents the status of each deployment + items: + description: Condition is from metav1.Condition. Cannot use it directly because the upgrade issue. Have to mark LastTransitionTime and Status as optional. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - message + - reason + - type + type: object + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_observabilityaddons.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_observabilityaddons.yaml new file mode 100644 index 000000000..9a43a4079 --- /dev/null +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_observabilityaddons.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: observabilityaddons.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: ObservabilityAddon + listKind: ObservabilityAddonList + plural: observabilityaddons + shortNames: + - oba + singular: observabilityaddon + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ObservabilityAddon is the Schema for the observabilityaddon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservabilityAddonSpec is the spec of observability addon + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics to hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + status: + description: ObservabilityAddonStatus defines the observed state of ObservabilityAddon + properties: + conditions: + items: + description: StatusCondition contains condition information for an observability addon + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/bundle/metadata/annotations.yaml b/operators/multiclusterobservability/bundle/metadata/annotations.yaml new file mode 100644 index 000000000..12a210b30 --- /dev/null +++ b/operators/multiclusterobservability/bundle/metadata/annotations.yaml @@ -0,0 +1,11 @@ +annotations: + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: multicluster-observability-operator + operators.operatorframework.io.metrics.builder: operator-sdk-v1.4.2 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.test.config.v1: tests/scorecard/ + operators.operatorframework.io.test.mediatype.v1: scorecard+v1 diff --git a/operators/multiclusterobservability/bundle/tests/scorecard/config.yaml b/operators/multiclusterobservability/bundle/tests/scorecard/config.yaml new file mode 100644 index 000000000..7e98e9c0c --- /dev/null +++ b/operators/multiclusterobservability/bundle/tests/scorecard/config.yaml @@ -0,0 +1,49 @@ +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: + - entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: basic + test: basic-check-spec-test + - entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: olm + test: olm-bundle-validation-test + - entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: olm + test: olm-crds-have-validation-test + - entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: olm + test: olm-crds-have-resources-test + - entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: olm + test: olm-spec-descriptors-test + - entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.4.2 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml b/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml new file mode 100644 index 000000000..fb6c031e0 --- /dev/null +++ b/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml @@ -0,0 +1,1783 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.5.0 + creationTimestamp: null + name: observatoria.core.observatorium.io +spec: + group: core.observatorium.io + names: + kind: Observatorium + listKind: ObservatoriumList + plural: observatoria + singular: observatorium + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Observatorium is the Schema for the observatoria API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservatoriumSpec defines the desired state of Observatorium + properties: + affinity: + description: Affinity causes all components to be scheduled on nodes with matching rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + api: + description: API + properties: + image: + description: API image + type: string + rbac: + description: RBAC is an RBAC configuration for the Observatorium API. + properties: + roleBindings: + description: RoleBindings is a slice of Observatorium API role bindings. + items: + description: RBACRoleBinding binds a set of roles to a set of subjects. + properties: + name: + description: Name is the name of the role binding. + type: string + roles: + description: Roles is a list of roles that will be bound. + items: + type: string + type: array + subjects: + description: Subjects is a list of subjects who will be given access to the specified roles. + items: + description: Subject represents a subject to which an RBAC role can be bound. + properties: + kind: + description: SubjectKind is a kind of Observatorium subject. + type: string + name: + type: string + required: + - kind + - name + type: object + type: array + required: + - name + - roles + - subjects + type: object + type: array + roles: + description: Roles is a slice of Observatorium API roles. + items: + description: RBACRole describes a set of permissions to interact with a tenant. + properties: + name: + description: Name is the name of the role. + type: string + permissions: + description: Permissions is a list of permissions that will be granted. + items: + description: Permission is an Observatorium RBAC permission. + type: string + type: array + resources: + description: Resources is a list of resources to which access will be granted. + items: + type: string + type: array + tenants: + description: Tenants is a list of tenants whose resources will be considered. + items: + type: string + type: array + required: + - name + - permissions + - resources + - tenants + type: object + type: array + required: + - roleBindings + - roles + type: object + replicas: + description: Number of API replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + tenants: + description: Tenants is a slice of tenants for the Observatorium API. + items: + description: APITenant represents a tenant in the Observatorium API. + properties: + id: + type: string + mTLS: + description: TenantMTLS represents the mTLS configuration for an Observatorium API tenant. + properties: + caKey: + type: string + configMapName: + type: string + secretName: + type: string + required: + - caKey + type: object + name: + type: string + oidc: + description: TenantOIDC represents the OIDC configuration for an Observatorium API tenant. + properties: + caKey: + type: string + clientID: + type: string + clientSecret: + type: string + configMapName: + type: string + issuerCAPath: + type: string + issuerURL: + type: string + redirectURL: + type: string + usernameClaim: + type: string + required: + - clientID + - issuerURL + type: object + required: + - id + - name + type: object + type: array + tls: + description: TLS configuration for the Observatorium API. + properties: + caKey: + type: string + certKey: + type: string + configMapName: + type: string + keyKey: + type: string + reloadInterval: + type: string + secretName: + type: string + serverName: + type: string + required: + - certKey + - keyKey + - secretName + type: object + version: + description: Version describes the version of API to use. + type: string + required: + - rbac + - tenants + type: object + envVars: + additionalProperties: + type: string + description: EnvVars define the common environment variables. EnvVars apply to thanos compact/receive/rule/store components + type: object + hashrings: + description: Hashrings describes a list of Hashrings + items: + properties: + hashring: + description: Thanos Hashring name + type: string + tenants: + description: Tenants describes a lists of tenants. + items: + type: string + type: array + required: + - hashring + type: object + type: array + loki: + description: Loki + properties: + image: + description: Loki image + type: string + replicas: + additionalProperties: + format: int32 + type: integer + description: Loki replicas per component + type: object + version: + description: Version of Loki image to be deployed + type: string + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - image + - volumeClaimTemplate + type: object + nodeSelector: + additionalProperties: + type: string + description: NodeSelector causes all components to be scheduled on nodes with matching labels. + type: object + objectStorageConfig: + description: Objest Storage Configuration + properties: + loki: + description: Object Store Config Secret for Loki + properties: + accessKeyIdKey: + description: Object Store Config key for AWS_ACCESS_KEY_ID + type: string + bucketsKey: + description: Object Store Config key for S3_BUCKETS + type: string + endpointKey: + description: Object Store Config key for S3_URL + type: string + regionKey: + description: Object Store Config key for S3_REGION + type: string + secretAccessKeyKey: + description: Object Store Config key for AWS_SECRET_ACCESS_KEY + type: string + secretName: + description: Object Store Config Secret Name + type: string + required: + - secretName + type: object + thanos: + description: Object Store Config Secret for Thanos + properties: + key: + description: Object Store Config key + type: string + name: + description: Object Store Config Secret Name + type: string + required: + - key + - name + type: object + required: + - thanos + type: object + pullSecret: + description: Pull secret used to pull the images. + type: string + securityContext: + description: Security options the pod should run with. + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + thanos: + description: Thanos Spec + properties: + compact: + description: Thanos CompactSpec + properties: + deleteDelay: + description: Time before a block marked for deletion is deleted from bucket + type: string + enableDownsampling: + description: EnableDownsampling enables downsampling. + type: boolean + replicas: + description: Number of Compact replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retentionResolution1h: + description: RetentionResolutionRaw + type: string + retentionResolution5m: + description: RetentionResolutionRaw + type: string + retentionResolutionRaw: + description: RetentionResolutionRaw + type: string + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - retentionResolution1h + - retentionResolution5m + - retentionResolutionRaw + - volumeClaimTemplate + type: object + image: + description: Thanos image + type: string + query: + description: Query + properties: + lookbackDelta: + description: The maximum lookback duration for retrieving metrics during expression evaluations. + type: string + replicas: + description: Number of Query replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + type: object + queryFrontend: + description: Thanos QueryFrontend + properties: + cache: + description: Memcached spec for QueryFrontend + properties: + connectionLimit: + description: Max simultaneous connections + format: int32 + type: integer + exporterImage: + description: Memcached Prometheus Exporter image + type: string + exporterResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + exporterVersion: + description: Version of Memcached Prometheus Exporter image to be deployed. + type: string + image: + description: Memcached image + type: string + maxItemSize: + description: 'Max item size (default: 1m, min: 1k, max: 1024m)' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Number of Memcached replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version of Memcached image to be deployed. + type: string + type: object + replicas: + description: Number of Query Frontend replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + type: object + receiveController: + description: Thanos Receive Controller Spec + properties: + image: + description: Receive Controller image + type: string + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version describes the version of Thanos receive controller to use. + type: string + type: object + receivers: + description: Thanos ThanosPersistentSpec + properties: + replicas: + description: Number of Receiver replicas. + format: int32 + type: integer + replicationFactor: + description: ReplicationFactor defines the number of copies of every time-series + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: How long to retain raw samples on local storage + type: string + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + rule: + description: Thanos RulerSpec + properties: + alertmanagerConfigFile: + description: AlertmanagerConfigFile + properties: + key: + description: Alertmanager ConfigMap key + type: string + name: + description: Alertmanager ConfigMap Name + type: string + required: + - key + - name + type: object + alertmanagerURLs: + description: AlertmanagerURLs + items: + type: string + type: array + blockDuration: + description: Block duration for TSDB block + type: string + evalInterval: + description: Evaluation interval + type: string + extraVolumeMounts: + description: ExtraVolumeMounts + items: + properties: + key: + description: File name for the mount + type: string + mountPath: + description: Volume mount path in the pod + type: string + name: + description: Resource name for the volume mount source + type: string + type: + description: Voume mount type, configMap or secret + type: string + required: + - key + - mountPath + - name + - type + type: object + type: array + reloaderImage: + description: ReloaderImage is an image of configmap reloader + type: string + reloaderResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + replicas: + description: Number of Rule replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Block retention time on local disk + type: string + rulesConfig: + description: RulesConfig configures rules from the configmaps + items: + properties: + key: + description: Rule ConfigMap key + type: string + name: + description: Rule ConfigMap Name + type: string + required: + - key + - name + type: object + type: array + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + store: + description: Thanos StoreSpec + properties: + cache: + description: Memcached spec for Store + properties: + connectionLimit: + description: Max simultaneous connections + format: int32 + type: integer + exporterImage: + description: Memcached Prometheus Exporter image + type: string + exporterResources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + exporterVersion: + description: Version of Memcached Prometheus Exporter image to be deployed. + type: string + image: + description: Memcached image + type: string + maxItemSize: + description: 'Max item size (default: 1m, min: 1k, max: 1024m)' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Number of Memcached replicas. + format: int32 + type: integer + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + version: + description: Version of Memcached image to be deployed. + type: string + type: object + resources: + description: Compute Resources required by this container. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + serviceMonitor: + description: ServiceMonitor enables servicemonitor. + type: boolean + shards: + format: int32 + type: integer + volumeClaimTemplate: + description: VolumeClaimTemplate + properties: + spec: + description: PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + required: + - volumeClaimTemplate + type: object + version: + description: Version of Thanos image to be deployed. + type: string + required: + - compact + - receivers + - rule + - store + type: object + tolerations: + description: Tolerations causes all components to tolerate specified taints. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - hashrings + - objectStorageConfig + type: object + status: + description: ObservatoriumStatus defines the observed state of Observatorium + properties: + conditions: + description: Represents the status of Observatorium + items: + properties: + currentStatus: + type: string + lastTransitionTime: + format: date-time + type: string + name: + type: string + required: + - currentStatus + - lastTransitionTime + - name + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml new file mode 100644 index 000000000..b51152836 --- /dev/null +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -0,0 +1,1004 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: multiclusterobservabilities.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: MultiClusterObservability + listKind: MultiClusterObservabilityList + plural: multiclusterobservabilities + shortNames: + - mco + singular: multiclusterobservability + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: MultiClusterObservability defines the configuration for the Observability + installation on Hub and Managed Clusters all through this one custom resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MultiClusterObservabilitySpec defines the desired state of + MultiClusterObservability + properties: + availabilityConfig: + default: High + description: 'ReplicaCount for HA support. Does not affect data stores. + Enabled will toggle HA support. This will provide better support + in cases of failover but consumes more resources. Options are: Basic + and High (default).' + type: string + enableDownSampling: + default: false + description: Enable or disable the downsample. The default value is + false. This is not recommended as querying long time ranges without + non-downsampled data is not efficient and useful. + type: boolean + imagePullPolicy: + default: Always + description: Pull policy of the MultiClusterObservability images + type: string + imagePullSecret: + default: multiclusterhub-operator-pull-secret + description: Pull secret of the MultiClusterObservability images + type: string + nodeSelector: + additionalProperties: + type: string + description: Spec of NodeSelector + type: object + observabilityAddonSpec: + description: The ObservabilityAddonSpec defines the global settings + for all managed clusters which have observability add-on enabled. + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push + metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics + to hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + retentionResolution1h: + default: 30d + description: How long to retain samples of resolution 2 (1 hour) in + bucket. + type: string + retentionResolution5m: + default: 14d + description: How long to retain samples of resolution 1 (5 minutes) + in bucket. + type: string + retentionResolutionRaw: + default: 5d + description: How long to retain raw samples in a bucket. + type: string + storageConfigObject: + description: Specifies the storage to be used by Observability + properties: + metricObjectStorage: + description: Object store config secret for metrics + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. Refer to https://thanos.io/storage.md/#configuration + for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - key + - name + type: object + statefulSetSize: + default: 10Gi + description: The amount of storage applied to the Observability + stateful sets, i.e. Thanos store, Rule, compact and receiver. + type: string + statefulSetStorageClass: + default: gp2 + description: "\tSpecify the storageClass Stateful Sets. This storage + class will also be used for Object Storage if MetricObjectStorage + was configured for the system to create the storage." + type: string + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + status: + description: MultiClusterObservabilityStatus defines the observed state + of MultiClusterObservability + properties: + conditions: + description: Represents the status of each deployment + items: + description: Condition is from metav1.Condition. Cannot use it directly + because the upgrade issue. Have to mark LastTransitionTime and + Status as optional. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - message + - reason + - type + type: object + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: false + deprecationWarning: observability.open-cluster-management.io/v1beta1 MultiClusterObservability is deprecated in v2.3+, unavailable in v2.6+; use observability.open-cluster-management.io/v1beta2 MultiClusterObservability + deprecated: true + subresources: + status: {} + - name: v1beta2 + schema: + openAPIV3Schema: + description: MultiClusterObservability defines the configuration for the Observability + installation on Hub and Managed Clusters all through this one custom resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MultiClusterObservabilitySpec defines the desired state of + MultiClusterObservability + properties: + advanced: + description: Advanced configurations for observability + properties: + alertmanager: + description: The spec of alertmanager + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + compact: + description: spec for thanos-compact + properties: + resources: + description: Compute Resources required by the compact. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + grafana: + description: The spec of grafana + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + observatoriumAPI: + description: Spec of observatorium api + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + query: + description: spec for thanos-query + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + queryFrontend: + description: spec for thanos-query-frontend + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + queryFrontendMemcached: + description: Specifies the store memcached + properties: + connectionLimit: + description: Max simultaneous connections of Memcached. + format: int32 + type: integer + maxItemSize: + description: 'Max item size of Memcached (default: 1m, min: + 1k, max: 1024m).' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + rbacQueryProxy: + description: The spec of rbac-query-proxy + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + receive: + description: spec for thanos-receiver + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + retentionConfig: + description: The spec of the data retention configurations + properties: + blockDuration: + description: configure --tsdb.block-duration in rule (Block + duration for TSDB block) + type: string + deleteDelay: + description: configure --delete-delay in compact Time before + a block marked for deletion is deleted from bucket. + type: string + retentionInLocal: + description: 'How long to retain raw samples in a local disk. + It applies to rule/receive: --tsdb.retention in receive + --tsdb.retention in rule' + type: string + retentionResolution1h: + description: How long to retain samples of resolution 2 (1 + hour) in bucket. It applies to --retention.resolution-1h + in compact. + type: string + retentionResolution5m: + description: How long to retain samples of resolution 1 (5 + minutes) in bucket. It applies to --retention.resolution-5m + in compact. + type: string + retentionResolutionRaw: + description: How long to retain raw samples in a bucket. It + applies to --retention.resolution-raw in compact. + type: string + type: object + rule: + description: spec for thanos-rule + properties: + evalInterval: + description: Evaluation interval + type: string + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + store: + description: spec for thanos-store-shard + properties: + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + storeMemcached: + description: Specifies the store memcached + properties: + connectionLimit: + description: Max simultaneous connections of Memcached. + format: int32 + type: integer + maxItemSize: + description: 'Max item size of Memcached (default: 1m, min: + 1k, max: 1024m).' + type: string + memoryLimitMb: + description: Memory limit of Memcached in megabytes. + format: int32 + type: integer + replicas: + description: Replicas for this component. + format: int32 + type: integer + resources: + description: Compute Resources required by this component. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of + compute resources required. If Requests is omitted for + a container, it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + type: object + enableDownsampling: + default: true + description: Enable or disable the downsample. + type: boolean + imagePullPolicy: + description: Pull policy of the MultiClusterObservability images + type: string + imagePullSecret: + description: Pull secret of the MultiClusterObservability images + type: string + nodeSelector: + additionalProperties: + type: string + description: Spec of NodeSelector + type: object + observabilityAddonSpec: + description: The ObservabilityAddonSpec defines the global settings + for all managed clusters which have observability add-on enabled. + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push + metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics + to hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + storageConfig: + description: Specifies the storage to be used by Observability + properties: + alertmanagerStorageSize: + default: 1Gi + description: The amount of storage applied to alertmanager stateful + sets, + type: string + compactStorageSize: + default: 100Gi + description: The amount of storage applied to thanos compact stateful + sets, + type: string + metricObjectStorage: + description: Object store config secret for metrics + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. Refer to https://thanos.io/storage.md/#configuration + for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + required: + - key + - name + type: object + receiveStorageSize: + default: 100Gi + description: The amount of storage applied to thanos receive stateful + sets, + type: string + ruleStorageSize: + default: 1Gi + description: The amount of storage applied to thanos rule stateful + sets, + type: string + storageClass: + default: gp2 + description: Specify the storageClass Stateful Sets. This storage + class will also be used for Object Storage if MetricObjectStorage + was configured for the system to create the storage. + type: string + storeStorageSize: + default: 10Gi + description: The amount of storage applied to thanos store stateful + sets, + type: string + required: + - metricObjectStorage + type: object + tolerations: + description: Tolerations causes all components to tolerate any taints. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - observabilityAddonSpec + - storageConfig + type: object + status: + description: MultiClusterObservabilityStatus defines the observed state + of MultiClusterObservability + properties: + conditions: + description: Represents the status of each deployment + items: + description: Condition is from metav1.Condition. Cannot use it directly + because the upgrade issue. Have to mark LastTransitionTime and + Status as optional. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - message + - reason + - type + type: object + type: array + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml new file mode 100644 index 000000000..0edc4ec55 --- /dev/null +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_observabilityaddons.yaml @@ -0,0 +1,121 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: observabilityaddons.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: ObservabilityAddon + listKind: ObservabilityAddonList + plural: observabilityaddons + shortNames: + - oba + singular: observabilityaddon + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ObservabilityAddon is the Schema for the observabilityaddon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservabilityAddonSpec is the spec of observability addon + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push + metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics to + hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + status: + description: ObservabilityAddonStatus defines the observed state of ObservabilityAddon + properties: + conditions: + items: + description: StatusCondition contains condition information for + an observability addon + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/config/crd/kustomization.yaml b/operators/multiclusterobservability/config/crd/kustomization.yaml new file mode 100644 index 000000000..9ed6a7364 --- /dev/null +++ b/operators/multiclusterobservability/config/crd/kustomization.yaml @@ -0,0 +1,13 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml +- bases/observability.open-cluster-management.io_observabilityaddons.yaml +- bases/core.observatorium.io_observatoria.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# patches here are for enabling webhook with ocp service serving certificate. +- patches/webhook_multiclusterobservabilities_cainjection_patch.yaml +# +kubebuilder:scaffold:kustomizepatch diff --git a/operators/multiclusterobservability/config/crd/patches/webhook_multiclusterobservabilities_cainjection_patch.yaml b/operators/multiclusterobservability/config/crd/patches/webhook_multiclusterobservabilities_cainjection_patch.yaml new file mode 100644 index 000000000..989447eec --- /dev/null +++ b/operators/multiclusterobservability/config/crd/patches/webhook_multiclusterobservabilities_cainjection_patch.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRDw +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + name: multiclusterobservabilities.observability.open-cluster-management.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1","v1beta1"] + clientConfig: + service: + name: multicluster-observability-webhook-service + namespace: open-cluster-management + port: 443 + path: /convert + caBundle: XG4= diff --git a/operators/multiclusterobservability/config/default/kustomization.yaml b/operators/multiclusterobservability/config/default/kustomization.yaml new file mode 100644 index 000000000..880824a4b --- /dev/null +++ b/operators/multiclusterobservability/config/default/kustomization.yaml @@ -0,0 +1,9 @@ +# Adds namespace to all resources. +namespace: open-cluster-management + +bases: +- ../crd +- ../rbac +- ../manager +- ../webhook + diff --git a/operators/multiclusterobservability/config/manager/kustomization.yaml b/operators/multiclusterobservability/config/manager/kustomization.yaml new file mode 100644 index 000000000..34dc10a84 --- /dev/null +++ b/operators/multiclusterobservability/config/manager/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- manager.yaml + +patchesStrategicMerge: +- manager_webhook_patch.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: quay.io/stolostron/multicluster-observability-operator + newName: quay.io/stolostron/multicluster-observability-operator + newTag: latest diff --git a/operators/multiclusterobservability/config/manager/manager.yaml b/operators/multiclusterobservability/config/manager/manager.yaml new file mode 100644 index 000000000..17669a6cf --- /dev/null +++ b/operators/multiclusterobservability/config/manager/manager.yaml @@ -0,0 +1,83 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + name: multicluster-observability-operator + name: open-cluster-management +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: multicluster-observability-operator + namespace: open-cluster-management + labels: + name: multicluster-observability-operator +spec: + replicas: 1 + selector: + matchLabels: + name: multicluster-observability-operator + template: + metadata: + labels: + name: multicluster-observability-operator + spec: + serviceAccountName: multicluster-observability-operator + containers: + - name: multicluster-observability-operator + command: + - mco-operator + args: + - -leader-elect + # Replace this with the built image name + image: quay.io/stolostron/multicluster-observability-operator:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: false + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + - containerPort: 8383 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "/usr/local/bin/prestop.sh"] + resources: + limits: + cpu: 600m + memory: 1024Mi + requests: + cpu: 100m + memory: 128Mi + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_NAME + value: "multicluster-observability-operator" + - name: TEMPLATES_PATH + value: /usr/local/manifests + - name: SPOKE_NAMESPACE + value: open-cluster-management-addon-observability diff --git a/operators/multiclusterobservability/config/manager/manager_webhook_patch.yaml b/operators/multiclusterobservability/config/manager/manager_webhook_patch.yaml new file mode 100644 index 000000000..421f7407a --- /dev/null +++ b/operators/multiclusterobservability/config/manager/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: multicluster-observability-operator + namespace: open-cluster-management +spec: + template: + spec: + containers: + - name: multicluster-observability-operator + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: multicluster-observability-operator-webhook-server-cert diff --git a/operators/multiclusterobservability/config/manifests/bases/multicluster-observability-operator.clusterserviceversion.yaml b/operators/multiclusterobservability/config/manifests/bases/multicluster-observability-operator.clusterserviceversion.yaml new file mode 100644 index 000000000..0ed34592c --- /dev/null +++ b/operators/multiclusterobservability/config/manifests/bases/multicluster-observability-operator.clusterserviceversion.yaml @@ -0,0 +1,58 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: '[]' + capabilities: Basic Install + name: multicluster-observability-operator.v0.0.0 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + displayName: MultiClusterObservability + kind: MultiClusterObservability + name: multiclusterobservabilities.observability.open-cluster-management.io + version: v1beta1 + - description: MultiClusterObservability defines the configuration for the Observability installation on Hub and Managed Clusters all through this one custom resource. + displayName: MultiClusterObservability + kind: MultiClusterObservability + name: multiclusterobservabilities.observability.open-cluster-management.io + version: v1beta2 + - description: ObservabilityAddon is the Schema for the observabilityaddon API + displayName: ObservabilityAddon + kind: ObservabilityAddon + name: observabilityaddons.observability.open-cluster-management.io + version: v1beta1 + description: The multicluster-observability-operator is a component of ACM observability feature. It is designed to install into Hub Cluster. + displayName: Multicluster Observability Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: null + strategy: "" + installModes: + - supported: false + type: OwnNamespace + - supported: false + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - MultiClusterObservability + links: + - name: Multicluster Observability Operator + url: https://multicluster-observability-operator.domain + maintainers: + - email: acm-contact@redhat.com + name: acm-contact + maturity: alpha + provider: + name: Red Hat, Inc + url: https://github.com/stolostron/multicluster-observability-operator + version: 0.0.0 diff --git a/operators/multiclusterobservability/config/manifests/kustomization.yaml b/operators/multiclusterobservability/config/manifests/kustomization.yaml new file mode 100644 index 000000000..ec2c16556 --- /dev/null +++ b/operators/multiclusterobservability/config/manifests/kustomization.yaml @@ -0,0 +1,3 @@ +resources: +- ../default +- ../samples diff --git a/operators/multiclusterobservability/config/rbac/kustomization.yaml b/operators/multiclusterobservability/config/rbac/kustomization.yaml new file mode 100644 index 000000000..f5842e59f --- /dev/null +++ b/operators/multiclusterobservability/config/rbac/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +# - role.yaml +# - role_binding.yaml +- mco_role_binding.yaml +- mco_role.yaml +- mco_service_account.yaml + diff --git a/operators/multiclusterobservability/config/rbac/mco_role.yaml b/operators/multiclusterobservability/config/rbac/mco_role.yaml new file mode 100644 index 000000000..f22ce4bca --- /dev/null +++ b/operators/multiclusterobservability/config/rbac/mco_role.yaml @@ -0,0 +1,337 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: multicluster-observability-operator +rules: +- apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + - namespaces + - nodes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - watch + - get + - list +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resourceNames: + - multicluster-observability-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get +- apiGroups: + - monitor.open-cluster-management.io + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - observability.open-cluster-management.io + resources: + - '*' + - multiclusterobservabilities + - endpointmonitorings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - core.observatorium.io + resources: + - observatoria + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - integreatly.org + resources: + - grafanas + - grafanas/status + - grafanas/finalizers + - grafanadashboards + - grafanadashboards/status + - grafanadatasources + - grafanadatasources/status + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + - routes/custom-host + - routes/status + verbs: + - get + - list + - create + - update + - delete + - deletecollection + - watch + - create +- apiGroups: + - cluster.open-cluster-management.io + resources: + - manageclusters + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - work.open-cluster-management.io + resources: + - manifestworks + verbs: + - '*' +- apiGroups: + - config.openshift.io + resources: + - '*' + - infrastructures + verbs: + - '*' +- apiGroups: + - operator.openshift.io + resources: + - ingresscontrollers + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - certmanager.k8s.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - delete + - get + - list + - watch + - create + - update + - patch +- apiGroups: + - addon.open-cluster-management.io + resources: + - clustermanagementaddons + verbs: + - create + - update + - get + - delete + - list + - watch +- apiGroups: + - addon.open-cluster-management.io + resources: + - managedclusteraddons + - managedclusteraddons/status + verbs: + - watch + - create + - update + - delete + - get + - list +- apiGroups: + - migration.k8s.io + resources: + - storageversionmigrations + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - cluster.open-cluster-management.io + verbs: + - watch + - get + - list + resources: + - managedclusters +- apiGroups: + - operator.open-cluster-management.io + verbs: + - watch + - get + - list + resources: + - multiclusterhubs +- apiGroups: + - certificates.k8s.io + verbs: + - get + - list + - watch + resources: + - certificatesigningrequests +- apiGroups: + - certificates.k8s.io + verbs: + - update + resources: + - certificatesigningrequests/status + - certificatesigningrequests/approval +- apiGroups: + - certificates.k8s.io + verbs: + - approve + resources: + - signers + resourceNames: + - kubernetes.io/kube-apiserver-client + - open-cluster-management.io/observability-signer +- apiGroups: + - certificates.k8s.io + verbs: + - get + - list + - watch + resources: + - certificatesigningrequests +- apiGroups: + - certificates.k8s.io + verbs: + - update + resources: + - certificatesigningrequests/status +- apiGroups: + - certificates.k8s.io + verbs: + - sign + resources: + - signers + resourceNames: + - open-cluster-management.io/observability-signer +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - imageregistry.open-cluster-management.io + resources: + - managedclusterimageregistries + verbs: + - get + - list + - watch diff --git a/operators/multiclusterobservability/config/rbac/mco_role_binding.yaml b/operators/multiclusterobservability/config/rbac/mco_role_binding.yaml new file mode 100644 index 000000000..55b7fa056 --- /dev/null +++ b/operators/multiclusterobservability/config/rbac/mco_role_binding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: multicluster-observability-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multicluster-observability-operator +subjects: +- kind: ServiceAccount + name: multicluster-observability-operator + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/config/rbac/mco_service_account.yaml b/operators/multiclusterobservability/config/rbac/mco_service_account.yaml new file mode 100644 index 000000000..ebcae7850 --- /dev/null +++ b/operators/multiclusterobservability/config/rbac/mco_service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multicluster-observability-operator + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/config/rbac/role.yaml b/operators/multiclusterobservability/config/rbac/role.yaml new file mode 100644 index 000000000..8f94993a9 --- /dev/null +++ b/operators/multiclusterobservability/config/rbac/role.yaml @@ -0,0 +1,60 @@ + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - observability.open-cluster-management.io + resources: + - multiclusterobservabilities + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - observability.open-cluster-management.io + resources: + - multiclusterobservabilities/finalizers + verbs: + - update +- apiGroups: + - observability.open-cluster-management.io + resources: + - multiclusterobservabilities/status + verbs: + - get + - patch + - update +- apiGroups: + - observability.open-cluster-management.io + resources: + - placementrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - observability.open-cluster-management.io + resources: + - placementrules/finalizers + verbs: + - update +- apiGroups: + - observability.open-cluster-management.io + resources: + - placementrules/status + verbs: + - get + - patch + - update diff --git a/operators/multiclusterobservability/config/samples/kustomization.yaml b/operators/multiclusterobservability/config/samples/kustomization.yaml new file mode 100644 index 000000000..f79069338 --- /dev/null +++ b/operators/multiclusterobservability/config/samples/kustomization.yaml @@ -0,0 +1,6 @@ +## Append samples you want in your CSV to this file as resources ## +resources: +- observability_v1beta1_multiclusterobservability.yaml +- observability_v1beta2_multiclusterobservability.yaml +- observability_v1beta1_observabilityaddon.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/operators/multiclusterobservability/config/samples/observability_v1beta1_multiclusterobservability.yaml b/operators/multiclusterobservability/config/samples/observability_v1beta1_multiclusterobservability.yaml new file mode 100644 index 000000000..6e9b27557 --- /dev/null +++ b/operators/multiclusterobservability/config/samples/observability_v1beta1_multiclusterobservability.yaml @@ -0,0 +1,10 @@ +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: MultiClusterObservability +metadata: + name: observability +spec: + observabilityAddonSpec: {} + storageConfigObject: + metricObjectStorage: + name: thanos-object-storage + key: thanos.yaml diff --git a/operators/multiclusterobservability/config/samples/observability_v1beta1_observabilityaddon.yaml b/operators/multiclusterobservability/config/samples/observability_v1beta1_observabilityaddon.yaml new file mode 100644 index 000000000..ad5104fec --- /dev/null +++ b/operators/multiclusterobservability/config/samples/observability_v1beta1_observabilityaddon.yaml @@ -0,0 +1,7 @@ +apiVersion: observability.open-cluster-management.io/v1beta1 +kind: ObservabilityAddon +metadata: + name: observability-addon +spec: + enableMetrics: true + interval: 30 diff --git a/operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml b/operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml new file mode 100644 index 000000000..ec65ee6d6 --- /dev/null +++ b/operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml @@ -0,0 +1,10 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability +spec: + observabilityAddonSpec: {} + storageConfig: + metricObjectStorage: + name: thanos-object-storage + key: thanos.yaml diff --git a/operators/multiclusterobservability/config/webhook/kustomization.yaml b/operators/multiclusterobservability/config/webhook/kustomization.yaml new file mode 100644 index 000000000..fa597c4a9 --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- service.yaml +- validatingwebhookconfiguration.yaml + +patchesStrategicMerge: +- webhook_service_cert_patch.yaml +- webhook_cainjection_patch.yaml diff --git a/operators/multiclusterobservability/config/webhook/manifests.yaml b/operators/multiclusterobservability/config/webhook/manifests.yaml new file mode 100644 index 000000000..2fe367265 --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/manifests.yaml @@ -0,0 +1,29 @@ + +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability + failurePolicy: Fail + name: vmulticlusterobservability.observability.open-cluster-management.io + rules: + - apiGroups: + - observability.open-cluster-management.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - multiclusterobservabilities + sideEffects: None diff --git a/operators/multiclusterobservability/config/webhook/service.yaml b/operators/multiclusterobservability/config/webhook/service.yaml new file mode 100644 index 000000000..aeb02731c --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: multicluster-observability-webhook-service + namespace: open-cluster-management + labels: + name: multicluster-observability-operator +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + name: multicluster-observability-operator diff --git a/operators/multiclusterobservability/config/webhook/validatingwebhookconfiguration.yaml b/operators/multiclusterobservability/config/webhook/validatingwebhookconfiguration.yaml new file mode 100644 index 000000000..6d7e57692 --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/validatingwebhookconfiguration.yaml @@ -0,0 +1,29 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: multicluster-observability-operator + creationTimestamp: null +webhooks: +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: multicluster-observability-webhook-service + namespace: open-cluster-management + port: 443 + path: /validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability + caBundle: XG4= + failurePolicy: Fail + name: vmulticlusterobservability.observability.open-cluster-management.io + rules: + - apiGroups: + - observability.open-cluster-management.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - multiclusterobservabilities + sideEffects: None diff --git a/operators/multiclusterobservability/config/webhook/webhook_cainjection_patch.yaml b/operators/multiclusterobservability/config/webhook/webhook_cainjection_patch.yaml new file mode 100644 index 000000000..96424469f --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/webhook_cainjection_patch.yaml @@ -0,0 +1,7 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: multicluster-observability-operator + annotations: + service.beta.openshift.io/inject-cabundle: "true" + diff --git a/operators/multiclusterobservability/config/webhook/webhook_service_cert_patch.yaml b/operators/multiclusterobservability/config/webhook/webhook_service_cert_patch.yaml new file mode 100644 index 000000000..7c6ab6a21 --- /dev/null +++ b/operators/multiclusterobservability/config/webhook/webhook_service_cert_patch.yaml @@ -0,0 +1,8 @@ +# The following patch enables the ocp service serving certificate for webhook service. +apiVersion: v1 +kind: Service +metadata: + name: multicluster-observability-webhook-service + namespace: open-cluster-management + annotations: + service.beta.openshift.io/serving-cert-secret-name: multicluster-observability-operator-webhook-server-cert diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go new file mode 100644 index 000000000..cd8708f45 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go @@ -0,0 +1,180 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "bytes" + "context" + "fmt" + "time" + + "gopkg.in/yaml.v2" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +const ( + defaultReplicas int32 = 1 + restartLabel = "datasource/time-restarted" + datasourceKey = "datasources.yaml" +) + +type GrafanaDatasources struct { + APIVersion int `yaml:"apiVersion,omitempty"` + Datasources []*GrafanaDatasource `yaml:"datasources,omitempty"` +} + +type GrafanaDatasource struct { + Access string `yaml:"access,omitempty"` + BasicAuth bool `yaml:"basicAuth,omitempty"` + BasicAuthPassword string `yaml:"basicAuthPassword,omitempty"` + BasicAuthUser string `yaml:"basicAuthUser,omitempty"` + Editable bool `yaml:"editable,omitempty"` + IsDefault bool `yaml:"isDefault,omitempty"` + Name string `yaml:"name,omitempty"` + OrgID int `yaml:"orgId,omitempty"` + Type string `yaml:"type,omitempty"` + URL string `yaml:"url,omitempty"` + Version int `yaml:"version,omitempty"` + JSONData *JsonData `yaml:"jsonData,omitempty"` + SecureJSONData *SecureJsonData `yaml:"secureJsonData,omitempty"` +} + +type JsonData struct { + TLSAuth bool `yaml:"tlsAuth,omitempty"` + TLSAuthCA bool `yaml:"tlsAuthWithCACert,omitempty"` + QueryTimeout string `yaml:"queryTimeout,omitempty"` + HttpMethod string `yaml:"httpMethod,omitempty"` + TimeInterval string `yaml:"timeInterval,omitempty"` +} + +type SecureJsonData struct { + TLSCACert string `yaml:"tlsCACert,omitempty"` + TLSClientCert string `yaml:"tlsClientCert,omitempty"` + TLSClientKey string `yaml:"tlsClientKey,omitempty"` +} + +// GenerateGrafanaDataSource is used to generate the GrafanaDatasource as a secret. +// the GrafanaDatasource points to observatorium api gateway service +func GenerateGrafanaDataSource( + c client.Client, + scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + + grafanaDatasources, err := yaml.Marshal(GrafanaDatasources{ + APIVersion: 1, + Datasources: []*GrafanaDatasource{ + { + Name: "Observatorium", + Type: "prometheus", + Access: "proxy", + IsDefault: true, + URL: fmt.Sprintf("http://%s.%s.svc.cluster.local:8080", config.ProxyServiceName, config.GetDefaultNamespace()), + JSONData: &JsonData{ + QueryTimeout: "300s", + TimeInterval: fmt.Sprintf("%ds", mco.Spec.ObservabilityAddonSpec.Interval), + }, + }, + }, + }) + if err != nil { + return &ctrl.Result{}, err + } + + dsSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "grafana-datasources", + Namespace: config.GetDefaultNamespace(), + }, + Type: "Opaque", + Data: map[string][]byte{ + datasourceKey: grafanaDatasources, + }, + } + + // Set MultiClusterObservability instance as the owner and controller + if err = controllerutil.SetControllerReference(mco, dsSecret, scheme); err != nil { + return &ctrl.Result{}, err + } + + // Check if this already exists + grafanaDSFound := &corev1.Secret{} + err = c.Get( + context.TODO(), + types.NamespacedName{ + Name: dsSecret.Name, + Namespace: dsSecret.Namespace, + }, + grafanaDSFound, + ) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating a new grafana datasource secret", + "dsSecret.Namespace", dsSecret.Namespace, + "dsSecret.Name", dsSecret.Name, + ) + + err = c.Create(context.TODO(), dsSecret) + if err != nil { + return &ctrl.Result{}, err + } + + // Pod created successfully - don't requeue + return nil, nil + } else if err != nil { + return &ctrl.Result{}, err + } + if (grafanaDSFound.Data[datasourceKey] != nil && + !bytes.Equal(grafanaDSFound.Data[datasourceKey], dsSecret.Data[datasourceKey])) || + grafanaDSFound.Data[datasourceKey] == nil { + log.Info("Updating grafana datasource secret") + err = c.Update(context.TODO(), dsSecret) + if err != nil { + log.Error(err, "Failed to update grafana datasource secret") + return &ctrl.Result{}, err + } + err = updateDeployLabel(c) + if err != nil { + return &ctrl.Result{}, err + } + } + + return nil, nil +} + +func updateDeployLabel(c client.Client) error { + name := config.GetOperandName(config.Grafana) + dep := &appv1.Deployment{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: config.GetDefaultNamespace(), + }, dep) + if err != nil { + if !errors.IsNotFound(err) { + log.Error(err, "Failed to check the deployment", "name", name) + } + return err + } + if dep.Status.ReadyReplicas != 0 { + dep.Spec.Template.ObjectMeta.Labels[restartLabel] = time.Now().Format("2006-1-2.1504") + err = c.Update(context.TODO(), dep) + if err != nil { + log.Error(err, "Failed to update the deployment", "name", name) + return err + } else { + log.Info("Update deployment datasource/restart label", "name", name) + } + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/grafana_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana_test.go new file mode 100644 index 000000000..14b822a0c --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana_test.go @@ -0,0 +1,24 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "testing" +) + +func TestUpdateGrafanaSpec(t *testing.T) { + // mco := &mcov1beta2.MultiClusterObservability{ + // Spec: mcov1beta2.MultiClusterObservabilitySpec{ + // Grafana: &mcov1beta2.GrafanaSpec{ + // Hostport: defaultHostport, + // }, + // }, + // } + + // updateGrafanaConfig(mco) + + // if mco.Spec.Grafana.Replicas != 1 { + // t.Errorf("Replicas (%v) is not the expected (%v)", mco.Spec.Grafana.Replicas, defaultReplicas) + // } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go new file mode 100644 index 000000000..3b4c1de82 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go @@ -0,0 +1,799 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "fmt" + "os" + "reflect" + "time" + + "github.com/go-logr/logr" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + storev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + placementctrl "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/controllers/placementrule" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/certificates" + certctrl "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/certificates" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering" + smctrl "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/servicemonitor" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/deploying" + commonutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" + mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" +) + +const ( + resFinalizer = "observability.open-cluster-management.io/res-cleanup" + // deprecated one + certFinalizer = "observability.open-cluster-management.io/cert-cleanup" +) + +var ( + log = logf.Log.WithName("controller_multiclustermonitoring") + enableHubRemoteWrite = os.Getenv("ENABLE_HUB_REMOTEWRITE") + isAlertmanagerStorageSizeChanged = false + isCompactStorageSizeChanged = false + isRuleStorageSizeChanged = false + isReceiveStorageSizeChanged = false + isStoreStorageSizeChanged = false +) + +// MultiClusterObservabilityReconciler reconciles a MultiClusterObservability object +type MultiClusterObservabilityReconciler struct { + Manager manager.Manager + Client client.Client + Log logr.Logger + Scheme *runtime.Scheme + CRDMap map[string]bool + APIReader client.Reader + RESTMapper meta.RESTMapper +} + +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=multiclusterobservabilities,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=multiclusterobservabilities/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=multiclusterobservabilities/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// Modify the Reconcile function to compare the state specified by +// the MultiClusterObservability object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile +func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLogger := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) + reqLogger.Info("Reconciling MultiClusterObservability") + + // Fetch the MultiClusterObservability instance + instance := &mcov1beta2.MultiClusterObservability{} + err := r.Client.Get(context.TODO(), types.NamespacedName{ + Name: config.GetMonitoringCRName(), + }, instance) + if err != nil { + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + // start to update mco status + StartStatusUpdate(r.Client, instance) + + ingressCtlCrdExists, _ := r.CRDMap[config.IngressControllerCRD] + if os.Getenv("UNIT_TEST") != "true" { + // start placement controller + err := placementctrl.StartPlacementController(r.Manager, r.CRDMap) + if err != nil { + return ctrl.Result{}, err + } + // setup ocm addon manager + certctrl.Start(r.Client, ingressCtlCrdExists) + + // start servicemonitor controller + smctrl.Start() + } + + // Init finalizers + isTerminating, err := r.initFinalization(instance) + if err != nil { + return ctrl.Result{}, err + } else if isTerminating { + reqLogger.Info("MCO instance is in Terminating status, skip the reconcile") + return ctrl.Result{}, err + } + + // check if the MCH CRD exists + mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + // requeue after 10 seconds if the mch crd exists and image image manifests map is empty + if mchCrdExists && len(config.GetImageManifests()) == 0 { + // if the mch CR is not ready, then requeue the request after 10s + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + // Do not reconcile objects if this instance of mch is labeled "paused" + if config.IsPaused(instance.GetAnnotations()) { + reqLogger.Info("MCO reconciliation is paused. Nothing more to do.") + return ctrl.Result{}, nil + } + + storageClassSelected, err := getStorageClass(instance, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // handle storagesize changes + result, err := r.HandleStorageSizeChange(instance) + if result != nil { + return *result, err + } + + //set operand names to cover the upgrade case since we have name changed in new release + err = config.SetOperandNames(r.Client) + if err != nil { + return *result, err + } + //instance.Namespace = config.GetDefaultNamespace() + instance.Spec.StorageConfig.StorageClass = storageClassSelected + //Render the templates with a specified CR + renderer := rendering.NewMCORenderer(instance) + toDeploy, err := renderer.Render() + if err != nil { + reqLogger.Error(err, "Failed to render multiClusterMonitoring templates") + return ctrl.Result{}, err + } + deployer := deploying.NewDeployer(r.Client) + //Deploy the resources + ns := &corev1.Namespace{} + for _, res := range toDeploy { + resNS := res.GetNamespace() + if resNS == config.GetDefaultNamespace() { + if err := controllerutil.SetControllerReference(instance, res, r.Scheme); err != nil { + reqLogger.Error(err, "Failed to set controller reference") + } + } + if resNS == "" { + resNS = config.GetDefaultNamespace() + } + if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: resNS}, ns); err != nil && apierrors.IsNotFound(err) { + ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: resNS, + }} + if err := r.Client.Create(context.TODO(), ns); err != nil { + reqLogger.Error(err, fmt.Sprintf("Failed to create namespace %s", resNS)) + return ctrl.Result{}, err + } + } + if err := deployer.Deploy(res); err != nil { + reqLogger.Error(err, fmt.Sprintf("Failed to deploy %s %s/%s", + res.GetKind(), config.GetDefaultNamespace(), res.GetName())) + return ctrl.Result{}, err + } + } + + // the route resource won't be created in testing env, for instance, KinD + // in the testing env, the service can be accessed via service name, we assume that + // in testing env, the local-cluster is the only allowed managedcluster + if ingressCtlCrdExists { + // expose alertmanager through route + result, err = GenerateAlertmanagerRoute(r.Client, r.Scheme, instance) + if result != nil { + return *result, err + } + + // expose observatorium api gateway + result, err = GenerateAPIGatewayRoute(r.Client, r.Scheme, instance) + if result != nil { + return *result, err + } + + // expose rbac proxy through route + result, err = GenerateProxyRoute(r.Client, r.Scheme, instance) + if result != nil { + return *result, err + } + } + + // create the certificates + err = certificates.CreateObservabilityCerts(r.Client, r.Scheme, instance, ingressCtlCrdExists) + if err != nil { + return ctrl.Result{}, err + } + + // create an Observatorium CR + result, err = GenerateObservatoriumCR(r.Client, r.Scheme, instance) + if result != nil { + return *result, err + } + + // generate grafana datasource to point to observatorium api gateway + result, err = GenerateGrafanaDataSource(r.Client, r.Scheme, instance) + if result != nil { + return *result, err + } + + svmCrdExists, _ := r.CRDMap[config.StorageVersionMigrationCrdName] + if svmCrdExists { + // create or update the storage version migration resource + err = createOrUpdateObservabilityStorageVersionMigrationResource(r.Client, r.Scheme, instance) + if err != nil { + return ctrl.Result{}, err + } + } + + //update status + requeueStatusUpdate <- struct{}{} + + return ctrl.Result{}, nil +} + +// labelsForMultiClusterMonitoring returns the labels for selecting the resources +// belonging to the given MultiClusterObservability CR name. +func labelsForMultiClusterMonitoring(name string) map[string]string { + return map[string]string{"observability.open-cluster-management.io/name": name} +} + +func (r *MultiClusterObservabilityReconciler) initFinalization( + mco *mcov1beta2.MultiClusterObservability) (bool, error) { + if mco.GetDeletionTimestamp() != nil && commonutil.Contains(mco.GetFinalizers(), resFinalizer) { + log.Info("To delete resources across namespaces") + svmCrdExists := r.CRDMap[config.StorageVersionMigrationCrdName] + if svmCrdExists { + // remove the StorageVersionMigration resource and ignore error + cleanObservabilityStorageVersionMigrationResource(r.Client, mco) // #nosec + } + // clean up the cluster resources, eg. clusterrole, clusterrolebinding, etc + if err := cleanUpClusterScopedResources(r.Client, mco); err != nil { + log.Error(err, "Failed to remove cluster scoped resources") + return false, err + } + + // clean up operand names + config.CleanUpOperandNames() + + mco.SetFinalizers(commonutil.Remove(mco.GetFinalizers(), resFinalizer)) + err := r.Client.Update(context.TODO(), mco) + if err != nil { + log.Error(err, "Failed to remove finalizer from mco resource") + return false, err + } + log.Info("Finalizer removed from mco resource") + + // stop update status routine + stopStatusUpdate <- struct{}{} + + return true, nil + } + if !commonutil.Contains(mco.GetFinalizers(), resFinalizer) { + mco.SetFinalizers(commonutil.Remove(mco.GetFinalizers(), certFinalizer)) + mco.SetFinalizers(append(mco.GetFinalizers(), resFinalizer)) + err := r.Client.Update(context.TODO(), mco) + if err != nil { + log.Error(err, "Failed to add finalizer to mco resource") + return false, err + } + log.Info("Finalizer added to mco resource") + } + return false, nil +} + +func getStorageClass(mco *mcov1beta2.MultiClusterObservability, cl client.Client) (string, error) { + storageClassSelected := mco.Spec.StorageConfig.StorageClass + // for the test, the reader is just nil + storageClassList := &storev1.StorageClassList{} + err := cl.List(context.TODO(), storageClassList, &client.ListOptions{}) + if err != nil { + return "", err + } + configuredWithValidSC := false + storageClassDefault := "" + for _, storageClass := range storageClassList.Items { + if storageClass.ObjectMeta.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" { + storageClassDefault = storageClass.ObjectMeta.Name + } + if storageClass.ObjectMeta.Name == storageClassSelected { + configuredWithValidSC = true + } + } + if !configuredWithValidSC { + storageClassSelected = storageClassDefault + } + return storageClassSelected, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) error { + c := mgr.GetClient() + mcoPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + //set request name to be used in placementrule controller + config.SetMonitoringCRName(e.Object.GetName()) + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + checkStorageChanged(e.ObjectOld.(*mcov1beta2.MultiClusterObservability).Spec.StorageConfig, + e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec.StorageConfig) + return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return !e.DeleteStateUnknown + }, + } + + cmPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetName() == config.AlertRuleCustomConfigMapName && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + config.SetCustomRuleConfigMap(true) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // Find a way to restart the alertmanager to take the update + // if e.ObjectNew.GetName() == config.AlertRuleCustomConfigMapName && + // e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() { + // config.SetCustomRuleConfigMap(true) + // return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() + // } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetName() == config.AlertRuleCustomConfigMapName && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + config.SetCustomRuleConfigMap(false) + return true + } + return false + }, + } + + secretPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetNamespace() == config.GetDefaultNamespace() && + (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || + e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() && + (e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCAName || + e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCERTName) { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetNamespace() == config.GetDefaultNamespace() && + (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || + e.Object.GetName() == config.AlertmanagerRouteBYOCERTName || + e.Object.GetName() == config.AlertmanagerConfigName) { + return true + } + return false + }, + } + + ctrBuilder := ctrl.NewControllerManagedBy(mgr). + // Watch for changes to primary resource MultiClusterObservability with predicate + For(&mcov1beta2.MultiClusterObservability{}, builder.WithPredicates(mcoPred)). + // Watch for changes to secondary resource Deployment and requeue the owner MultiClusterObservability + Owns(&appsv1.Deployment{}). + // Watch for changes to secondary resource statefulSet and requeue the owner MultiClusterObservability + Owns(&appsv1.StatefulSet{}). + // Watch for changes to secondary resource ConfigMap and requeue the owner c + Owns(&corev1.ConfigMap{}). + // Watch for changes to secondary resource Secret and requeue the owner MultiClusterObservability + Owns(&corev1.Secret{}). + // Watch for changes to secondary resource Service and requeue the owner MultiClusterObservability + Owns(&corev1.Service{}). + // Watch for changes to secondary Observatorium CR and requeue the owner MultiClusterObservability + Owns(&observatoriumv1alpha1.Observatorium{}). + // Watch the configmap for thanos-ruler-custom-rules update + Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(cmPred)). + // Watch the secret for deleting event of alertmanager-config + Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(secretPred)) + + mchGroupKind := schema.GroupKind{Group: mchv1.SchemeGroupVersion.Group, Kind: "MultiClusterHub"} + if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.SchemeGroupVersion.Version); err == nil { + mchPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // this is for operator restart, the mch CREATE event will be caught and the mch should be ready + if e.Object.GetNamespace() == config.GetMCONamespace() && + e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && + e.Object.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion { + // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap(c, e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion) + if err != nil { + return false + } + return ok + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetNamespace() == config.GetMCONamespace() && + e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && + e.ObjectNew.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion { + // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap(c, e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion) + if err != nil { + return false + } + return ok + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + if mchCrdExists { + // secondary watch for MCH + ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &mchv1.MultiClusterHub{}}, handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + Namespace: a.GetNamespace(), + }}, + } + }), builder.WithPredicates(mchPred)) + } + } + + // create and return a new controller + return ctrBuilder.Complete(r) +} + +func checkStorageChanged(mcoOldConfig, mcoNewConfig *mcov1beta2.StorageConfig) { + + if mcoOldConfig.AlertmanagerStorageSize != mcoNewConfig.AlertmanagerStorageSize { + isAlertmanagerStorageSizeChanged = true + } + if mcoOldConfig.CompactStorageSize != mcoNewConfig.CompactStorageSize { + isCompactStorageSizeChanged = true + } + if mcoOldConfig.RuleStorageSize != mcoNewConfig.RuleStorageSize { + isRuleStorageSizeChanged = true + } + if mcoOldConfig.ReceiveStorageSize != mcoNewConfig.ReceiveStorageSize { + isReceiveStorageSizeChanged = true + } + if mcoOldConfig.StoreStorageSize != mcoNewConfig.StoreStorageSize { + isStoreStorageSizeChanged = true + } +} + +// HandleStorageSizeChange is used to deal with the storagesize change in CR +// 1. Directly changed the StatefulSet pvc's size on the pvc itself for +// 2. Removed StatefulSet and +// wait for operator to re-create the StatefulSet with the correct size on the claim +func (r *MultiClusterObservabilityReconciler) HandleStorageSizeChange( + mco *mcov1beta2.MultiClusterObservability) (*reconcile.Result, error) { + + if isAlertmanagerStorageSizeChanged { + isAlertmanagerStorageSizeChanged = false + err := updateStorageSizeChange(r.Client, + map[string]string{ + "observability.open-cluster-management.io/name": mco.GetName(), + "alertmanager": "observability", + }, mco.Spec.StorageConfig.AlertmanagerStorageSize) + if err != nil { + return &reconcile.Result{}, err + } + } + + if isReceiveStorageSizeChanged { + isReceiveStorageSizeChanged = false + err := updateStorageSizeChange(r.Client, + map[string]string{ + "app.kubernetes.io/instance": mco.GetName(), + "app.kubernetes.io/name": "thanos-receive", + }, mco.Spec.StorageConfig.ReceiveStorageSize) + if err != nil { + return &reconcile.Result{}, err + } + } + + if isCompactStorageSizeChanged { + isCompactStorageSizeChanged = false + err := updateStorageSizeChange(r.Client, + map[string]string{ + "app.kubernetes.io/instance": mco.GetName(), + "app.kubernetes.io/name": "thanos-compact", + }, mco.Spec.StorageConfig.CompactStorageSize) + if err != nil { + return &reconcile.Result{}, err + } + } + + if isRuleStorageSizeChanged { + isRuleStorageSizeChanged = false + err := updateStorageSizeChange(r.Client, + map[string]string{ + "app.kubernetes.io/instance": mco.GetName(), + "app.kubernetes.io/name": "thanos-rule", + }, mco.Spec.StorageConfig.RuleStorageSize) + if err != nil { + return &reconcile.Result{}, err + } + } + + if isStoreStorageSizeChanged { + isStoreStorageSizeChanged = false + err := updateStorageSizeChange(r.Client, + map[string]string{ + "app.kubernetes.io/instance": mco.GetName(), + "app.kubernetes.io/name": "thanos-store", + }, mco.Spec.StorageConfig.StoreStorageSize) + if err != nil { + return &reconcile.Result{}, err + } + } + return nil, nil +} + +func updateStorageSizeChange(c client.Client, matchLabels map[string]string, storageSize string) error { + + pvcList := []corev1.PersistentVolumeClaim{} + stsList := []appsv1.StatefulSet{} + + pvcList, err := util.GetPVCList(c, matchLabels) + if err != nil { + return err + } + + stsList, err = util.GetStatefulSetList(c, matchLabels) + if err != nil { + return err + } + + // update pvc directly + for index, pvc := range pvcList { + if !pvc.Spec.Resources.Requests.Storage().Equal(resource.MustParse(storageSize)) { + pvcList[index].Spec.Resources.Requests = corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(storageSize), + } + err := c.Update(context.TODO(), &pvcList[index]) + if err != nil { + return err + } + log.Info("Update storage size for PVC", "pvc", pvc.Name) + } + } + // update sts + for index, sts := range stsList { + err := c.Delete(context.TODO(), &stsList[index], &client.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + return err + } + log.Info("Successfully delete sts due to storage size changed", "sts", sts.Name) + } + return nil +} + +// GenerateAlertmanagerRoute create route for Alertmanager endpoint +func GenerateAlertmanagerRoute( + runclient client.Client, scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + amGateway := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerRouteName, + Namespace: config.GetDefaultNamespace(), + }, + Spec: routev1.RouteSpec{ + Path: "/api/v2", + Port: &routev1.RoutePort{ + TargetPort: intstr.FromString("oauth-proxy"), + }, + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: config.AlertmanagerServiceName, + }, + TLS: &routev1.TLSConfig{ + Termination: routev1.TLSTerminationReencrypt, + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + }, + }, + } + + amRouteBYOCaSrt := &corev1.Secret{} + amRouteBYOCertSrt := &corev1.Secret{} + err1 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, amRouteBYOCaSrt) + err2 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, amRouteBYOCertSrt) + + if err1 == nil && err2 == nil { + log.Info("BYO CA/Certificate found for the Route of Alertmanager, will using BYO CA/certificate for the Route of Alertmanager") + amRouteCA, ok := amRouteBYOCaSrt.Data["tls.crt"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO CA for the Route of Alertmanager") + } + amGateway.Spec.TLS.CACertificate = string(amRouteCA) + + amRouteCert, ok := amRouteBYOCertSrt.Data["tls.crt"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO Certificate for the Route of Alertmanager") + } + amGateway.Spec.TLS.Certificate = string(amRouteCert) + + amRouteCertKey, ok := amRouteBYOCertSrt.Data["tls.key"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO Certificate Key for the Route of Alertmanager") + } + amGateway.Spec.TLS.Key = string(amRouteCertKey) + } + + // Set MultiClusterObservability instance as the owner and controller + if err := controllerutil.SetControllerReference(mco, amGateway, scheme); err != nil { + return &ctrl.Result{}, err + } + + found := &routev1.Route{} + err := runclient.Get(context.TODO(), types.NamespacedName{Name: amGateway.Name, Namespace: amGateway.Namespace}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating a new route to expose alertmanager", "amGateway.Namespace", amGateway.Namespace, "amGateway.Name", amGateway.Name) + err = runclient.Create(context.TODO(), amGateway) + if err != nil { + return &ctrl.Result{}, err + } + return nil, nil + } + if !reflect.DeepEqual(found.Spec.TLS, amGateway.Spec.TLS) { + log.Info("Found update for the TLS configuration of the Alertmanager Route, try to update the Route") + amGateway.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = runclient.Update(context.TODO(), amGateway) + if err != nil { + return &ctrl.Result{}, err + } + } + return nil, nil +} + +// GenerateProxyRoute create route for Proxy endpoint +func GenerateProxyRoute( + runclient client.Client, scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + proxyGateway := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.ProxyRouteName, + Namespace: config.GetDefaultNamespace(), + }, + Spec: routev1.RouteSpec{ + Port: &routev1.RoutePort{ + TargetPort: intstr.FromString("https"), + }, + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: config.ProxyServiceName, + }, + TLS: &routev1.TLSConfig{ + Termination: routev1.TLSTerminationReencrypt, + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + }, + }, + } + + proxyRouteBYOCaSrt := &corev1.Secret{} + proxyRouteBYOCertSrt := &corev1.Secret{} + err1 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.ProxyRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, proxyRouteBYOCaSrt) + err2 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.ProxyRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, proxyRouteBYOCertSrt) + + if err1 == nil && err2 == nil { + log.Info("BYO CA/Certificate found for the Route of Proxy, will using BYO CA/certificate for the Route of Proxy") + proxyRouteCA, ok := proxyRouteBYOCaSrt.Data["tls.crt"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO CA for the Route of Proxy") + } + proxyGateway.Spec.TLS.CACertificate = string(proxyRouteCA) + + proxyRouteCert, ok := proxyRouteBYOCertSrt.Data["tls.crt"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO Certificate for the Route of Proxy") + } + proxyGateway.Spec.TLS.Certificate = string(proxyRouteCert) + + proxyRouteCertKey, ok := proxyRouteBYOCertSrt.Data["tls.key"] + if !ok { + return &ctrl.Result{}, fmt.Errorf("Invalid BYO Certificate Key for the Route of Proxy") + } + proxyGateway.Spec.TLS.Key = string(proxyRouteCertKey) + } + + // Set MultiClusterObservability instance as the owner and controller + if err := controllerutil.SetControllerReference(mco, proxyGateway, scheme); err != nil { + return &ctrl.Result{}, err + } + + found := &routev1.Route{} + err := runclient.Get(context.TODO(), types.NamespacedName{Name: proxyGateway.Name, Namespace: proxyGateway.Namespace}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating a new route to expose rbac proxy", "proxyGateway.Namespace", proxyGateway.Namespace, "proxyGateway.Name", proxyGateway.Name) + err = runclient.Create(context.TODO(), proxyGateway) + if err != nil { + return &ctrl.Result{}, err + } + return nil, nil + } + if !reflect.DeepEqual(found.Spec.TLS, proxyGateway.Spec.TLS) { + log.Info("Found update for the TLS configuration of the Proxy Route, try to update the Route") + proxyGateway.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = runclient.Update(context.TODO(), proxyGateway) + if err != nil { + return &ctrl.Result{}, err + } + } + return nil, nil +} + +// cleanUpClusterScopedResources delete the cluster scoped resources created by the MCO operator +// The cluster scoped resources need to be deleted manually because they don't have ownerrefenence set as the MCO CR +func cleanUpClusterScopedResources(cl client.Client, mco *mcov1beta2.MultiClusterObservability) error { + matchLabels := map[string]string{config.GetCrLabelKey(): mco.Name} + listOpts := []client.ListOption{ + client.MatchingLabels(matchLabels), + } + + clusterRoleList := &rbacv1.ClusterRoleList{} + err := cl.List(context.TODO(), clusterRoleList, listOpts...) + if err != nil { + return err + } + for idx := range clusterRoleList.Items { + err := cl.Delete(context.TODO(), &clusterRoleList.Items[idx], &client.DeleteOptions{}) + if err != nil { + return err + } + } + + clusterRoleBindingList := &rbacv1.ClusterRoleBindingList{} + err = cl.List(context.TODO(), clusterRoleBindingList, listOpts...) + if err != nil { + return err + } + for idx := range clusterRoleBindingList.Items { + err := cl.Delete(context.TODO(), &clusterRoleBindingList.Items[idx], &client.DeleteOptions{}) + if err != nil { + return err + } + } + + return nil +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go new file mode 100644 index 000000000..578721ff2 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go @@ -0,0 +1,915 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "os" + "path" + "strings" + "testing" + "time" + + configv1 "github.com/openshift/api/config/v1" + routev1 "github.com/openshift/api/route/v1" + observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + + mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering/templates" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func init() { + os.Setenv("UNIT_TEST", "true") +} + +func newTestCert(name string, namespace string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + "ca.crt": []byte("test-ca-crt"), + "tls.crt": []byte("test-tls-crt"), + "tls.key": []byte("test-tls-key"), + }, + } +} + +var testImagemanifestsMap = map[string]string{ + "endpoint_monitoring_operator": "test.io/endpoint-monitoring:test", + "grafana": "test.io/origin-grafana:test", + "grafana_dashboard_loader": "test.io/grafana-dashboard-loader:test", + "management_ingress": "test.io/management-ingress:test", + "observatorium": "test.io/observatorium:test", + "observatorium_operator": "test.io/observatorium-operator:test", + "prometheus_alertmanager": "test.io/prometheus-alertmanager:test", + "prometheus-config-reloader": "test.io/configmap-reloader:test", + "rbac_query_proxy": "test.io/rbac-query-proxy:test", + "thanos": "test.io/thanos:test", + "thanos_receive_controller": "test.io/thanos_receive_controller:test", +} + +func newTestImageManifestsConfigMap(namespace, version string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.ImageManifestConfigMapNamePrefix + version, + Namespace: namespace, + Labels: map[string]string{ + config.OCMManifestConfigMapTypeLabelKey: config.OCMManifestConfigMapTypeLabelValue, + config.OCMManifestConfigMapVersionLabelKey: version, + }, + }, + Data: testImagemanifestsMap, + } +} + +func newMCHInstanceWithVersion(namespace, version string) *mchv1.MultiClusterHub { + return &mchv1.MultiClusterHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: namespace, + }, + Spec: mchv1.MultiClusterHubSpec{}, + Status: mchv1.MultiClusterHubStatus{ + CurrentVersion: version, + DesiredVersion: version, + }, + } +} + +func TestLabelsForMultiClusterMonitoring(t *testing.T) { + lab := labelsForMultiClusterMonitoring("test") + + value, _ := lab["observability.open-cluster-management.io/name"] + if value != "test" { + t.Errorf("value (%v) is not the expected (test)", value) + } +} + +func createObservatoriumAPIService(name, namespace string) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-observatorium-api", + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/component": "api", + "app.kubernetes.io/instance": name, + }, + }, + Spec: corev1.ServiceSpec{}, + } +} + +func newClusterManagementAddon() *addonv1alpha1.ClusterManagementAddOn { + return &addonv1alpha1.ClusterManagementAddOn{ + TypeMeta: metav1.TypeMeta{ + APIVersion: addonv1alpha1.SchemeGroupVersion.String(), + Kind: "ClusterManagementAddOn", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ObservabilityController", + }, + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + AddOnMeta: addonv1alpha1.AddOnMeta{ + DisplayName: "ObservabilityController", + Description: "ObservabilityController Description", + }, + AddOnConfiguration: addonv1alpha1.ConfigCoordinates{ + CRDName: "observabilityaddons.observability.open-cluster-management.io", + }, + }, + } +} + +func createReadyStatefulSet(name, namespace, statefulSetName string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: statefulSetName, + Namespace: namespace, + Labels: map[string]string{ + "observability.open-cluster-management.io/name": name, + }, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 1, + Replicas: 1, + }, + } +} + +func createFailedStatefulSet(name, namespace, statefulSetName string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: statefulSetName, + Namespace: namespace, + Labels: map[string]string{ + "observability.open-cluster-management.io/name": name, + }, + }, + Status: appsv1.StatefulSetStatus{ + ReadyReplicas: 0, + }, + } +} + +func createReadyDeployment(name, namespace string) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/component": "api", + "app.kubernetes.io/instance": name, + "observability.open-cluster-management.io/name": name, + }, + }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 1, + AvailableReplicas: 1, + Replicas: 1, + }, + } +} + +func createFailedDeployment(name, namespace string) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/component": "api", + "app.kubernetes.io/instance": name, + "observability.open-cluster-management.io/name": name, + }, + }, + Status: appsv1.DeploymentStatus{ + ReadyReplicas: 0, + }, + } +} + +func createClusterVersion() *configv1.ClusterVersion { + return &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{Name: "version"}, + Spec: configv1.ClusterVersionSpec{ + ClusterID: configv1.ClusterID("xxx-xxxxxx-xxxx"), + }, + } +} + +func createMultiClusterHubCRD() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: config.MCHCrdName}, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Scope: apiextensionsv1beta1.NamespaceScoped, + Conversion: &apiextensionsv1beta1.CustomResourceConversion{Strategy: apiextensionsv1beta1.NoneConverter}, + Group: "operator.open-cluster-management.io", + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Kind: "MultiClusterHub", + ListKind: "MultiClusterHubList", + Plural: "multiclusterhubs", + ShortNames: []string{"mch"}, + Singular: "multiclusterhub", + }, + Version: "v1", + Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ + {Name: "v1", Storage: true, Served: true}, + }, + }, + } +} + +func TestMultiClusterMonitoringCRUpdate(t *testing.T) { + var ( + name = "monitoring" + namespace = config.GetDefaultNamespace() + ) + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get work dir: (%v)", err) + } + testManifestsPath := path.Join(wd, "../../tests/manifests") + os.Setenv("TEMPLATES_PATH", testManifestsPath) + + // A MultiClusterObservability object with metadata and spec. + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + config.AnnotationKeyImageTagSuffix: "tag", + }, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + StorageClass: "gp2", + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: false, + }, + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + routev1.AddToScheme(s) + clusterv1.AddToScheme(s) + addonv1alpha1.AddToScheme(s) + migrationv1alpha1.SchemeBuilder.AddToScheme(s) + + svc := createObservatoriumAPIService(name, namespace) + serverCACerts := newTestCert(config.ServerCACerts, namespace) + clientCACerts := newTestCert(config.ClientCACerts, namespace) + grafanaCert := newTestCert(config.GrafanaCerts, namespace) + serverCert := newTestCert(config.ServerCerts, namespace) + // byo case for the alertmanager route + testAmRouteBYOCaSecret := newTestCert(config.AlertmanagerRouteBYOCAName, namespace) + testAmRouteBYOCertSecret := newTestCert(config.AlertmanagerRouteBYOCERTName, namespace) + clustermgmtAddon := newClusterManagementAddon() + + objs := []runtime.Object{mco, svc, serverCACerts, clientCACerts, grafanaCert, serverCert, + testAmRouteBYOCaSecret, testAmRouteBYOCertSecret, clustermgmtAddon} + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + + // Create a ReconcileMemcached object with the scheme and fake client. + r := &MultiClusterObservabilityReconciler{Client: cl, Scheme: s, CRDMap: map[string]bool{config.IngressControllerCRD: true}} + config.SetMonitoringCRName(name) + // Mock request to simulate Reconcile() being called on an event for a + // watched resource . + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: name, + }, + } + + // Create empty client + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO := &mcov1beta2.MultiClusterObservability{} + err = cl.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + + status := findStatusCondition(updatedMCO.Status.Conditions, "Failed") + if status == nil || status.Reason != "ObjectStorageSecretNotFound" { + t.Errorf("Failed to get correct MCO status, expect Failed") + } + + amRoute := &routev1.Route{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Name: config.AlertmanagerRouteName, + Namespace: namespace, + }, amRoute) + if err != nil { + t.Fatalf("Failed to get alertmanager's route: (%v)", err) + } + // check the BYO certificate for alertmanager's route + if amRoute.Spec.TLS.CACertificate != "test-tls-crt" || + amRoute.Spec.TLS.Certificate != "test-tls-crt" || + amRoute.Spec.TLS.Key != "test-tls-key" { + t.Fatalf("incorrect certificate for alertmanager's route") + } + + err = cl.Create(context.TODO(), createSecret("test", "test", namespace)) + if err != nil { + t.Fatalf("Failed to create secret: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + status = findStatusCondition(updatedMCO.Status.Conditions, "Failed") + if status == nil || status.Reason != "DeploymentNotFound" { + t.Errorf("Failed to get correct MCO status, expect Failed") + } + expectedDeploymentNames := getExpectedDeploymentNames() + for _, deployName := range expectedDeploymentNames { + deploy := createReadyDeployment(deployName, namespace) + err = cl.Create(context.TODO(), deploy) + if err != nil { + t.Fatalf("Failed to create deployment %s: %v", deployName, err) + } + } + + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + status = findStatusCondition(updatedMCO.Status.Conditions, "Failed") + if status == nil || status.Reason != "StatefulSetNotFound" { + t.Errorf("Failed to get correct MCO status, expect Failed") + } + + expectedStatefulSetNames := getExpectedStatefulSetNames() + for _, statefulName := range expectedStatefulSetNames { + deploy := createReadyStatefulSet(name, namespace, statefulName) + err = cl.Create(context.TODO(), deploy) + if err != nil { + t.Fatalf("Failed to create stateful set %s: %v", statefulName, err) + } + } + + result, err := r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + if result.Requeue { + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + + status = findStatusCondition(updatedMCO.Status.Conditions, "Ready") + if status == nil || status.Reason != "Ready" { + t.Errorf("Failed to get correct MCO status, expect Ready") + } + + status = findStatusCondition(updatedMCO.Status.Conditions, "MetricsDisabled") + if status == nil || status.Reason != "MetricsDisabled" { + t.Errorf("Failed to get correct MCO status, expect MetricsDisabled") + } + + // test MetricsDisabled status + err = cl.Delete(context.TODO(), mco) + if err != nil { + t.Fatalf("Failed to delete mco: (%v)", err) + } + // reconcile to make sure the finalizer of the mco cr is deleted + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + // wait for the stop status update channel is closed + time.Sleep(1 * time.Second) + + mco.Spec.ObservabilityAddonSpec.EnableMetrics = true + mco.ObjectMeta.ResourceVersion = "" + err = cl.Create(context.TODO(), mco) + if err != nil { + t.Fatalf("Failed to create mco: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + + status = findStatusCondition(updatedMCO.Status.Conditions, "MetricsDisabled") + if status != nil { + t.Errorf("Should have not MetricsDisabled status") + } + + // test StatefulSetNotReady status + err = cl.Delete(context.TODO(), createReadyStatefulSet( + name, + namespace, + config.GetOperandNamePrefix()+"alertmanager")) + if err != nil { + t.Fatalf("Failed to delete alertmanager: (%v)", err) + } + failedAlertManager := createFailedStatefulSet( + name, + namespace, + config.GetOperandNamePrefix()+"alertmanager") + err = cl.Create(context.TODO(), failedAlertManager) + if err != nil { + t.Fatalf("Failed to create alertmanager: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + + status = findStatusCondition(updatedMCO.Status.Conditions, "Ready") + if status == nil || status.Reason != "Ready" { + t.Errorf("Failed to get correct MCO status, expect Ready") + } + + // test DeploymentNotReady status + err = cl.Delete(context.TODO(), createReadyDeployment(config.GetOperandNamePrefix()+"rbac-query-proxy", namespace)) + if err != nil { + t.Fatalf("Failed to delete rbac-query-proxy: (%v)", err) + } + err = cl.Delete(context.TODO(), failedAlertManager) + if err != nil { + t.Fatalf("Failed to delete alertmanager: (%v)", err) + } + err = cl.Create(context.TODO(), createReadyStatefulSet( + name, + namespace, + config.GetOperandNamePrefix()+"alertmanager")) + if err != nil { + t.Fatalf("Failed to delete alertmanager: (%v)", err) + } + + failedRbacProxy := createFailedDeployment("rbac-query-proxy", namespace) + err = cl.Create(context.TODO(), failedRbacProxy) + if err != nil { + t.Fatalf("Failed to create rbac-query-proxy: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedMCO = &mcov1beta2.MultiClusterObservability{} + err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) + if err != nil { + t.Fatalf("Failed to get MultiClusterObservability: (%v)", err) + } + + status = findStatusCondition(updatedMCO.Status.Conditions, "Ready") + if status == nil || status.Reason != "Ready" { + t.Errorf("Failed to get correct MCO status, expect Ready") + } + + //Test finalizer + mco.ObjectMeta.DeletionTimestamp = &v1.Time{Time: time.Now()} + mco.ObjectMeta.Finalizers = []string{resFinalizer, "test-finalizerr"} + mco.ObjectMeta.ResourceVersion = updatedMCO.ObjectMeta.ResourceVersion + err = cl.Update(context.TODO(), mco) + if err != nil { + t.Fatalf("Failed to update MultiClusterObservability: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile for finalizer: (%v)", err) + } + +} + +func TestImageReplaceForMCO(t *testing.T) { + var ( + name = "test-monitoring" + namespace = config.GetDefaultNamespace() + version = "2.3.0" + ) + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get work dir: (%v)", err) + } + os.MkdirAll(path.Join(wd, "../../tests"), 0755) + testManifestsPath := path.Join(wd, "../../tests/manifests") + manifestsPath := path.Join(wd, "../../manifests") + os.Setenv("TEMPLATES_PATH", testManifestsPath) + templates.ResetTemplates() + err = os.Symlink(manifestsPath, testManifestsPath) + if err != nil { + t.Fatalf("Failed to create symbollink(%s) to(%s) for the test manifests: (%v)", testManifestsPath, manifestsPath, err) + } + + // A MultiClusterObservability object with metadata and spec. + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + StorageClass: "gp2", + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: false, + }, + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + routev1.AddToScheme(s) + clusterv1.AddToScheme(s) + addonv1alpha1.AddToScheme(s) + mchv1.SchemeBuilder.AddToScheme(s) + migrationv1alpha1.SchemeBuilder.AddToScheme(s) + + observatoriumAPIsvc := createObservatoriumAPIService(name, namespace) + serverCACerts := newTestCert(config.ServerCACerts, namespace) + clientCACerts := newTestCert(config.ClientCACerts, namespace) + grafanaCert := newTestCert(config.GrafanaCerts, namespace) + serverCert := newTestCert(config.ServerCerts, namespace) + // create the image manifest configmap + testMCHInstance := newMCHInstanceWithVersion(config.GetMCONamespace(), version) + imageManifestsCM := newTestImageManifestsConfigMap(config.GetMCONamespace(), version) + // byo case for the alertmanager route + testAmRouteBYOCaSecret := newTestCert(config.AlertmanagerRouteBYOCAName, namespace) + testAmRouteBYOCertSecret := newTestCert(config.AlertmanagerRouteBYOCERTName, namespace) + clustermgmtAddon := newClusterManagementAddon() + + objs := []runtime.Object{mco, observatoriumAPIsvc, serverCACerts, clientCACerts, grafanaCert, serverCert, + testMCHInstance, imageManifestsCM, testAmRouteBYOCaSecret, testAmRouteBYOCertSecret, clustermgmtAddon} + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + + // Create a ReconcileMemcached object with the scheme and fake client. + r := &MultiClusterObservabilityReconciler{Client: cl, Scheme: s, CRDMap: map[string]bool{config.MCHCrdName: true, config.IngressControllerCRD: true}} + config.SetMonitoringCRName(name) + + // Mock request to simulate Reconcile() being called on an event for a watched resource . + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + Namespace: config.GetMCONamespace(), + }, + } + + // set the image manifests map for testing + config.SetImageManifests(testImagemanifestsMap) + + // trigger another reconcile for MCH update event + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + //wait for update status + time.Sleep(1 * time.Second) + + expectedDeploymentNames := []string{ + config.GetOperandNamePrefix() + config.Grafana, + config.GetOperandNamePrefix() + config.ObservatoriumOperator, + config.GetOperandNamePrefix() + config.RBACQueryProxy, + } + for _, deployName := range expectedDeploymentNames { + deploy := &appsv1.Deployment{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Name: deployName, + Namespace: namespace, + }, deploy) + if err != nil { + t.Fatalf("Failed to get deployment %s: %v", deployName, err) + } + for _, container := range deploy.Spec.Template.Spec.Containers { + imageKey := strings.ReplaceAll(container.Name, "-", "_") + switch container.Name { + case "oauth-proxy": + // TODO: add oauth-proxy image to image manifests + continue + case "config-reloader": + imageKey = "prometheus-config-reloader" + } + imageValue, exists := testImagemanifestsMap[imageKey] + if !exists { + t.Fatalf("The image key(%s) for the container(%s) doesn't exist in the deployment(%s)", imageKey, container.Name, deployName) + } + if imageValue != container.Image { + t.Fatalf("The image(%s) for the container(%s) in the deployment(%s) should be replaced with the one(%s) in the image manifests", container.Image, container.Name, deployName, imageValue) + } + } + } + + expectedStatefulSetNames := []string{ + config.GetOperandNamePrefix() + config.Alertmanager, + } + for _, statefulName := range expectedStatefulSetNames { + sts := &appsv1.StatefulSet{} + err = cl.Get(context.TODO(), types.NamespacedName{ + Name: statefulName, + Namespace: namespace, + }, sts) + if err != nil { + t.Fatalf("Failed to get statefulset %s: %v", statefulName, err) + } + for _, container := range sts.Spec.Template.Spec.Containers { + imageKey := strings.ReplaceAll(container.Name, "-", "_") + switch container.Name { + case "oauth-proxy", "alertmanager-proxy": + // TODO: add oauth-proxy image to image manifests + continue + case "alertmanager": + imageKey = "prometheus_alertmanager" + case "config-reloader": + imageKey = "prometheus-config-reloader" + } + imageValue, exists := testImagemanifestsMap[imageKey] + if !exists { + t.Fatalf("The image key(%s) for the container(%s) doesn't exist in the statefulset(%s)", imageKey, container.Name, statefulName) + } + if imageValue != container.Image { + t.Fatalf("The image(%s) for the container(%s) in the statefulset(%s) should not replace with the one in the image manifests", imageValue, container.Name, statefulName) + } + } + } + + if err = os.Remove(testManifestsPath); err != nil { + t.Fatalf("Failed to delete symbollink(%s) for the test manifests: (%v)", testManifestsPath, err) + } + os.Remove(path.Join(wd, "../../tests")) + + // stop update status routine + stopStatusUpdate <- struct{}{} + //wait for update status + time.Sleep(1 * time.Second) +} + +func createSecret(key, name, namespace string) *corev1.Secret { + + s3Conf := &config.ObjectStorgeConf{ + Type: "s3", + Config: config.Config{ + Bucket: "bucket", + Endpoint: "endpoint", + Insecure: true, + AccessKey: "access_key", + SecretKey: "secret_key`", + }, + } + configYaml, _ := yaml.Marshal(s3Conf) + + configYamlMap := map[string][]byte{} + configYamlMap[key] = configYaml + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Type: "Opaque", + Data: configYamlMap, + } +} + +func TestCheckObjStorageStatus(t *testing.T) { + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + }, + }, + } + + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + objs := []runtime.Object{mco} + c := fake.NewFakeClient(objs...) + mcoCondition := checkObjStorageStatus(c, mco) + if mcoCondition == nil { + t.Errorf("check s3 conf failed: got %v, expected non-nil", mcoCondition) + } + + err := c.Create(context.TODO(), createSecret("test", "test", config.GetDefaultNamespace())) + if err != nil { + t.Fatalf("Failed to create secret: (%v)", err) + } + + mcoCondition = checkObjStorageStatus(c, mco) + if mcoCondition != nil { + t.Errorf("check s3 conf failed: got %v, expected nil", mcoCondition) + } + + updateSecret := createSecret("error", "test", config.GetDefaultNamespace()) + updateSecret.ObjectMeta.ResourceVersion = "1" + err = c.Update(context.TODO(), updateSecret) + if err != nil { + t.Fatalf("Failed to update secret: (%v)", err) + } + + mcoCondition = checkObjStorageStatus(c, mco) + if mcoCondition == nil { + t.Errorf("check s3 conf failed: got %v, expected no-nil", mcoCondition) + } +} + +func TestHandleStorageSizeChange(t *testing.T) { + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + AlertmanagerStorageSize: "2Gi", + }, + }, + } + + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + objs := []runtime.Object{ + mco, + createStatefulSet(mco.Name, config.GetDefaultNamespace(), "test"), + createPersistentVolumeClaim(mco.Name, config.GetDefaultNamespace(), "test"), + } + c := fake.NewFakeClient(objs...) + r := &MultiClusterObservabilityReconciler{Client: c, Scheme: s} + isAlertmanagerStorageSizeChanged = true + r.HandleStorageSizeChange(mco) + + pvc := &corev1.PersistentVolumeClaim{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: "test", + Namespace: config.GetDefaultNamespace(), + }, pvc) + + if err == nil { + if !pvc.Spec.Resources.Requests.Storage().Equal(resource.MustParse("2Gi")) { + t.Errorf("update pvc failed: got %v, expected 2Gi", pvc.Spec.Resources.Requests.Storage()) + } + } else { + t.Errorf("update pvc failed: %v", err) + } + +} + +func createStatefulSet(name, namespace, statefulSetName string) *appsv1.StatefulSet { + return &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: statefulSetName, + Namespace: namespace, + Labels: map[string]string{ + "observability.open-cluster-management.io/name": name, + "alertmanager": "observability", + }, + }, + } +} + +func createPersistentVolumeClaim(name, namespace, pvcName string) *corev1.PersistentVolumeClaim { + storage := "gp2" + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + Labels: map[string]string{ + "observability.open-cluster-management.io/name": name, + "alertmanager": "observability", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + StorageClassName: &storage, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse("1Gi"), + }, + }, + }, + } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go new file mode 100644 index 000000000..766e20a28 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go @@ -0,0 +1,392 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "fmt" + "reflect" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +var ( + stopStatusUpdate = make(chan struct{}) + stopCheckReady = make(chan struct{}) + requeueStatusUpdate = make(chan struct{}) + updateStatusIsRunnning = false + updateReadyStatusIsRunnning = false +) + +// Start goroutines to update MCO status +func StartStatusUpdate(c client.Client, instance *mcov1beta2.MultiClusterObservability) { + if !updateStatusIsRunnning { + go func() { + updateStatusIsRunnning = true + // defer close(stopStatusUpdate) + // defer close(requeueStatusUpdate) + for { + select { + case <-stopStatusUpdate: + updateStatusIsRunnning = false + close(stopCheckReady) + log.V(1).Info("status update goroutine is stopped.") + return + case <-requeueStatusUpdate: + log.V(1).Info("status update goroutine is triggered.") + updateStatus(c) + if updateReadyStatusIsRunnning && checkReadyStatus(c, instance) { + log.V(1).Info("send singal to stop status check ready goroutine because MCO status is ready") + stopCheckReady <- struct{}{} + } + } + } + }() + if !updateReadyStatusIsRunnning { + // init the stop ready check channel + stopCheckReady = make(chan struct{}) + go func() { + updateReadyStatusIsRunnning = true + // defer close(stopCheckReady) + for { + select { + case <-stopCheckReady: + updateReadyStatusIsRunnning = false + log.V(1).Info("check status ready goroutine is stopped.") + return + case <-time.After(2 * time.Second): + log.V(1).Info("check status ready goroutine is triggered.") + if checkReadyStatus(c, instance) { + requeueStatusUpdate <- struct{}{} + } + } + } + }() + } + } +} + +// updateStatus override UpdateStatus interface +func updateStatus(c client.Client) { + instance := &mcov1beta2.MultiClusterObservability{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: config.GetMonitoringCRName(), + }, instance) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to get existing mco %s", instance.Name)) + return + } + oldStatus := instance.Status + newStatus := oldStatus.DeepCopy() + updateInstallStatus(&newStatus.Conditions) + updateReadyStatus(&newStatus.Conditions, c, instance) + updateAddonSpecStatus(&newStatus.Conditions, instance) + fillupStatus(&newStatus.Conditions) + instance.Status.Conditions = newStatus.Conditions + if !reflect.DeepEqual(newStatus.Conditions, oldStatus.Conditions) { + err := c.Status().Update(context.TODO(), instance) + if err != nil { + log.Error(err, fmt.Sprintf("failed to update status of mco %s", instance.Name)) + return + } + } + + return +} + +// fillup the status if there is no status and lastTransitionTime in upgrade case +func fillupStatus(conditions *[]mcoshared.Condition) { + for idx, condition := range *conditions { + if condition.Status == "" { + (*conditions)[idx].Status = metav1.ConditionUnknown + } + if condition.LastTransitionTime.IsZero() { + (*conditions)[idx].LastTransitionTime = metav1.NewTime(time.Now()) + } + } +} + +func updateInstallStatus(conditions *[]mcoshared.Condition) { + setStatusCondition(conditions, *newInstallingCondition()) +} + +func checkReadyStatus(c client.Client, mco *mcov1beta2.MultiClusterObservability) bool { + + if findStatusCondition(mco.Status.Conditions, "Ready") != nil { + return true + } + + objStorageStatus := checkObjStorageStatus(c, mco) + if objStorageStatus != nil { + return false + } + + deployStatus := checkDeployStatus(c, mco) + if deployStatus != nil { + return false + } + + statefulStatus := checkStatefulSetStatus(c, mco) + if statefulStatus != nil { + return false + } + return true +} + +func updateReadyStatus( + conditions *[]mcoshared.Condition, + c client.Client, + mco *mcov1beta2.MultiClusterObservability) { + + if findStatusCondition(*conditions, "Ready") != nil { + return + } + + objStorageStatus := checkObjStorageStatus(c, mco) + if objStorageStatus != nil { + setStatusCondition(conditions, *objStorageStatus) + return + } + + deployStatus := checkDeployStatus(c, mco) + if deployStatus != nil { + setStatusCondition(conditions, *deployStatus) + return + } + + statefulStatus := checkStatefulSetStatus(c, mco) + if statefulStatus != nil { + setStatusCondition(conditions, *statefulStatus) + return + } + + setStatusCondition(conditions, *newReadyCondition()) + removeStatusCondition(conditions, "Failed") +} + +// setStatusCondition sets the corresponding condition in conditions to newCondition. +// conditions must be non-nil. +// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to +// newCondition, LastTransitionTime is set to now if the new status differs from the old status) +// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, +// and newCondition is appended) +func setStatusCondition(conditions *[]mcoshared.Condition, newCondition mcoshared.Condition) { + if conditions == nil { + return + } + existingCondition := findStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + if newCondition.LastTransitionTime.IsZero() { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + if !newCondition.LastTransitionTime.IsZero() { + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } else { + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// removeStatusCondition removes the corresponding conditionType from conditions. +// conditions must be non-nil. +func removeStatusCondition(conditions *[]mcoshared.Condition, conditionType string) { + if conditions == nil { + return + } + newConditions := make([]mcoshared.Condition, 0, len(*conditions)-1) + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// findStatusCondition finds the conditionType in conditions. +func findStatusCondition(conditions []mcoshared.Condition, conditionType string) *mcoshared.Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func updateAddonSpecStatus( + conditions *[]mcoshared.Condition, + mco *mcov1beta2.MultiClusterObservability) { + addonStatus := checkAddonSpecStatus(mco) + if addonStatus != nil { + setStatusCondition(conditions, *addonStatus) + } else { + removeStatusCondition(conditions, "MetricsDisabled") + } +} + +func getExpectedDeploymentNames() []string { + return []string{ + config.GetOperandNamePrefix() + config.Grafana, + config.GetOperandNamePrefix() + config.ObservatoriumAPI, + config.GetOperandNamePrefix() + config.ThanosQuery, + config.GetOperandNamePrefix() + config.ThanosQueryFrontend, + config.GetOperandNamePrefix() + config.ThanosReceiveController, + config.GetOperandNamePrefix() + config.ObservatoriumOperator, + config.GetOperandNamePrefix() + config.RBACQueryProxy, + } +} + +func checkDeployStatus( + c client.Client, + mco *mcov1beta2.MultiClusterObservability) *mcoshared.Condition { + expectedDeploymentNames := getExpectedDeploymentNames() + for _, name := range expectedDeploymentNames { + found := &appsv1.Deployment{} + namespacedName := types.NamespacedName{ + Name: name, + Namespace: config.GetDefaultNamespace(), + } + err := c.Get(context.TODO(), namespacedName, found) + if err != nil { + msg := fmt.Sprintf("Failed to found expected deployment %s", name) + return newFailedCondition("DeploymentNotFound", msg) + } + + if found.Status.ReadyReplicas != found.Status.Replicas { + msg := fmt.Sprintf("Deployment %s is not ready", name) + return newFailedCondition("DeploymentNotReady", msg) + } + } + + return nil +} + +func getExpectedStatefulSetNames() []string { + return []string{ + config.GetOperandNamePrefix() + config.Alertmanager, + config.GetOperandNamePrefix() + config.ThanosCompact, + config.GetOperandNamePrefix() + config.ThanosReceive, + config.GetOperandNamePrefix() + config.ThanosRule, + config.GetOperandNamePrefix() + config.ThanosStoreMemcached, + config.GetOperandNamePrefix() + config.ThanosStoreShard + "-0", + } +} + +func checkStatefulSetStatus( + c client.Client, + mco *mcov1beta2.MultiClusterObservability) *mcoshared.Condition { + expectedStatefulSetNames := getExpectedStatefulSetNames() + for _, name := range expectedStatefulSetNames { + found := &appsv1.StatefulSet{} + namespacedName := types.NamespacedName{ + Name: name, + Namespace: config.GetDefaultNamespace(), + } + err := c.Get(context.TODO(), namespacedName, found) + if err != nil { + msg := fmt.Sprintf("Failed to found expected stateful set %s", name) + return newFailedCondition("StatefulSetNotFound", msg) + } + + if found.Status.ReadyReplicas != found.Status.Replicas { + msg := fmt.Sprintf("StatefulSet %s is not ready", name) + return newFailedCondition("StatefulSetNotReady", msg) + } + } + + return nil +} + +func checkObjStorageStatus( + c client.Client, + mco *mcov1beta2.MultiClusterObservability) *mcoshared.Condition { + objStorageConf := mco.Spec.StorageConfig.MetricObjectStorage + secret := &corev1.Secret{} + namespacedName := types.NamespacedName{ + Name: objStorageConf.Name, + Namespace: config.GetDefaultNamespace(), + } + + err := c.Get(context.TODO(), namespacedName, secret) + if err != nil { + return newFailedCondition("ObjectStorageSecretNotFound", err.Error()) + } + + data, ok := secret.Data[objStorageConf.Key] + if !ok { + msg := fmt.Sprintf("Failed to found the object storage configuration key from secret %s", secret.Name) + return newFailedCondition("ObjectStorageConfInvalid", msg) + } + + ok, err = config.CheckObjStorageConf(data) + if !ok { + return newFailedCondition("ObjectStorageConfInvalid", err.Error()) + } + + return nil +} + +func checkAddonSpecStatus(mco *mcov1beta2.MultiClusterObservability) *mcoshared.Condition { + addonSpec := mco.Spec.ObservabilityAddonSpec + if addonSpec != nil && addonSpec.EnableMetrics == false { + log.Info("Disable metrics collocter") + return newMetricsDisabledCondition() + } + return nil +} + +func newInstallingCondition() *mcoshared.Condition { + return &mcoshared.Condition{ + Type: "Installing", + Status: "True", + Reason: "Installing", + Message: "Installation is in progress", + } +} + +func newReadyCondition() *mcoshared.Condition { + return &mcoshared.Condition{ + Type: "Ready", + Status: "True", + Reason: "Ready", + Message: "Observability components are deployed and running", + } +} + +func newFailedCondition(reason string, msg string) *mcoshared.Condition { + return &mcoshared.Condition{ + Type: "Failed", + Status: "False", + Reason: reason, + Message: msg, + } +} + +func newMetricsDisabledCondition() *mcoshared.Condition { + return &mcoshared.Condition{ + Type: "MetricsDisabled", + Status: "True", + Reason: "MetricsDisabled", + Message: "Collect metrics from the managed clusters is disabled", + } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status_test.go new file mode 100644 index 000000000..a99d06351 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status_test.go @@ -0,0 +1,266 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "reflect" + "testing" + "time" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFillupStatus(t *testing.T) { + + raw := ` +conditions: +- message: Installation is in progress + reason: Installing + type: Installing +- message: Observability components are deployed and running + reason: Ready + type: Ready +` + status := mcov1beta2.MultiClusterObservabilityStatus{} + err := yaml.Unmarshal([]byte(raw), &status) + if err != nil { + t.Errorf("Failed to unmarshall MultiClusterObservabilityStatus %v", err) + } + newStatus := status.DeepCopy() + fillupStatus(&newStatus.Conditions) + for _, condition := range newStatus.Conditions { + if condition.Status == "" { + t.Fatal("Failed to fillup the status") + } + if condition.LastTransitionTime.IsZero() { + t.Fatal("Failed to fillup the status") + } + } +} + +func TestSetStatusCondition(t *testing.T) { + oneHourBefore := time.Now().Add(-1 * time.Hour) + oneHourAfter := time.Now().Add(1 * time.Hour) + + tests := []struct { + name string + conditions []mcoshared.Condition + toAdd mcoshared.Condition + expected []mcoshared.Condition + }{ + { + name: "should-add", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "third"}, + }, + toAdd: mcoshared.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + expected: []mcoshared.Condition{ + {Type: "first"}, + {Type: "third"}, + {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + }, + }, + { + name: "use-supplied-time", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second", Status: metav1.ConditionFalse}, + {Type: "third"}, + }, + toAdd: mcoshared.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + expected: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + {Type: "third"}, + }, + }, + { + name: "update-fields", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}}, + {Type: "third"}, + }, + toAdd: mcoshared.Condition{Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourAfter}, Reason: "reason", Message: "message"}, + expected: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second", Status: metav1.ConditionTrue, LastTransitionTime: metav1.Time{Time: oneHourBefore}, Reason: "reason", Message: "message"}, + {Type: "third"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + setStatusCondition(&test.conditions, test.toAdd) + if !reflect.DeepEqual(test.conditions, test.expected) { + t.Error(test.conditions) + } + }) + } +} + +func TestRemoveStatusCondition(t *testing.T) { + tests := []struct { + name string + conditions []mcoshared.Condition + conditionType string + expected []mcoshared.Condition + }{ + { + name: "present", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second"}, + {Type: "third"}, + }, + conditionType: "second", + expected: []mcoshared.Condition{ + {Type: "first"}, + {Type: "third"}, + }, + }, + { + name: "not-present", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second"}, + {Type: "third"}, + }, + conditionType: "fourth", + expected: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second"}, + {Type: "third"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + removeStatusCondition(&test.conditions, test.conditionType) + if !reflect.DeepEqual(test.conditions, test.expected) { + t.Error(test.conditions) + } + }) + } +} + +func TestFindStatusCondition(t *testing.T) { + tests := []struct { + name string + conditions []mcoshared.Condition + conditionType string + expected *mcoshared.Condition + }{ + { + name: "not-present", + conditions: []mcoshared.Condition{ + {Type: "first"}, + }, + conditionType: "second", + expected: nil, + }, + { + name: "present", + conditions: []mcoshared.Condition{ + {Type: "first"}, + {Type: "second"}, + }, + conditionType: "second", + expected: &mcoshared.Condition{Type: "second"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := findStatusCondition(test.conditions, test.conditionType) + if !reflect.DeepEqual(actual, test.expected) { + t.Error(actual) + } + }) + } +} + +func TestStartStatusUpdate(t *testing.T) { + mcoconfig.SetMonitoringCRName("observability") + // A MultiClusterObservability object with metadata and spec. + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: mcoconfig.GetMonitoringCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + StorageClass: "gp2", + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: false, + }, + }, + Status: mcov1beta2.MultiClusterObservabilityStatus{ + Conditions: []mcoshared.Condition{}, + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + + objs := []runtime.Object{mco, createSecret("test", "test", mcoconfig.GetMCONamespace())} + cl := fake.NewFakeClient(objs...) + + StartStatusUpdate(cl, mco) + + requeueStatusUpdate <- struct{}{} + time.Sleep(3 * time.Second) + + instance := &mcov1beta2.MultiClusterObservability{} + _ = cl.Get(context.TODO(), types.NamespacedName{ + Name: mcoconfig.GetMonitoringCRName(), + }, instance) + + if findStatusCondition(instance.Status.Conditions, "Installing") == nil { + t.Fatal("failed to update mco status with Installing") + } + if findStatusCondition(instance.Status.Conditions, "MetricsDisabled") == nil { + t.Fatal("failed to update mco status with MetricsDisabled") + } + + instance.Spec.ObservabilityAddonSpec.EnableMetrics = true + err := cl.Update(context.TODO(), instance) + if err != nil { + t.Fatalf("Failed to update MultiClusterObservability: (%v)", err) + } + requeueStatusUpdate <- struct{}{} + time.Sleep(3 * time.Second) + + instance = &mcov1beta2.MultiClusterObservability{} + _ = cl.Get(context.TODO(), types.NamespacedName{ + Name: mcoconfig.GetMonitoringCRName(), + }, instance) + + if findStatusCondition(instance.Status.Conditions, "MetricsDisabled") != nil { + t.Fatal("failed to update mco status to remove MetricsDisabled") + } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go new file mode 100644 index 000000000..0687df844 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go @@ -0,0 +1,687 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "bytes" + "context" + "fmt" + "os" + "time" + + routev1 "github.com/openshift/api/route/v1" + obsv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/yaml" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + obsAPIGateway = "observatorium-api" + + readOnlyRoleName = "read-only-metrics" + writeOnlyRoleName = "write-only-metrics" +) + +// GenerateObservatoriumCR returns Observatorium cr defined in MultiClusterObservability +func GenerateObservatoriumCR( + cl client.Client, scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + + labels := map[string]string{ + "app": mcoconfig.GetOperandName(mcoconfig.Observatorium), + } + + storageClassSelected, err := getStorageClass(mco, cl) + if err != nil { + return &ctrl.Result{}, err + } + + log.Info("storageClassSelected", "storageClassSelected", storageClassSelected) + + observatoriumCR := &obsv1alpha1.Observatorium{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoconfig.GetOperandName(mcoconfig.Observatorium), + Namespace: mcoconfig.GetDefaultNamespace(), + Labels: labels, + }, + Spec: *newDefaultObservatoriumSpec(mco, storageClassSelected), + } + + // Set MultiClusterObservability instance as the owner and controller + if err := controllerutil.SetControllerReference(mco, observatoriumCR, scheme); err != nil { + return &ctrl.Result{}, err + } + + // Check if this Observatorium CR already exists + observatoriumCRFound := &obsv1alpha1.Observatorium{} + err = cl.Get( + context.TODO(), + types.NamespacedName{ + Name: observatoriumCR.Name, + Namespace: observatoriumCR.Namespace, + }, + observatoriumCRFound, + ) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating a new observatorium CR", + "observatorium", observatoriumCR.Name, + ) + err = cl.Create(context.TODO(), observatoriumCR) + if err != nil { + return &ctrl.Result{}, err + } + return nil, nil + } else if err != nil { + return &ctrl.Result{}, err + } + + oldSpec := observatoriumCRFound.Spec + newSpec := observatoriumCR.Spec + oldSpecBytes, _ := yaml.Marshal(oldSpec) + newSpecBytes, _ := yaml.Marshal(newSpec) + if bytes.Equal(newSpecBytes, oldSpecBytes) { + return nil, nil + } + + // keep the tenant id unchanged + for i, newTenant := range newSpec.API.Tenants { + for _, oldTenant := range oldSpec.API.Tenants { + updateTenantID(&newSpec, newTenant, oldTenant, i) + } + } + + log.Info("Updating observatorium CR", + "observatorium", observatoriumCR.Name, + ) + + newObj := observatoriumCRFound.DeepCopy() + newObj.Spec = newSpec + err = cl.Update(context.TODO(), newObj) + if err != nil { + log.Error(err, "Failed to update observatorium CR %s", observatoriumCR.Name) + // add timeout for update failure avoid update conflict + return &ctrl.Result{RequeueAfter: time.Second * 3}, err + } + + // delete the store-share statefulset in scalein scenario + err = deleteStoreSts(cl, observatoriumCR.Name, + *oldSpec.Thanos.Store.Shards, *newSpec.Thanos.Store.Shards) + if err != nil { + return &ctrl.Result{}, err + } + + return nil, nil +} + +func updateTenantID( + newSpec *obsv1alpha1.ObservatoriumSpec, + newTenant obsv1alpha1.APITenant, + oldTenant obsv1alpha1.APITenant, + idx int) { + + if oldTenant.Name == newTenant.Name && newTenant.ID == oldTenant.ID { + return + } + + newSpec.API.Tenants[idx].ID = oldTenant.ID + for j, hashring := range newSpec.Hashrings { + if util.Contains(hashring.Tenants, newTenant.ID) { + newSpec.Hashrings[j].Tenants = util.Remove(newSpec.Hashrings[j].Tenants, newTenant.ID) + newSpec.Hashrings[j].Tenants = append(newSpec.Hashrings[0].Tenants, oldTenant.ID) + } + } +} + +// GenerateAPIGatewayRoute defines aaa +func GenerateAPIGatewayRoute( + runclient client.Client, scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + + apiGateway := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: obsAPIGateway, + Namespace: mcoconfig.GetDefaultNamespace(), + }, + Spec: routev1.RouteSpec{ + Port: &routev1.RoutePort{ + TargetPort: intstr.FromString("public"), + }, + To: routev1.RouteTargetReference{ + Kind: "Service", + Name: mcoconfig.GetOperandNamePrefix() + "observatorium-api", + }, + TLS: &routev1.TLSConfig{ + Termination: routev1.TLSTerminationPassthrough, + InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyNone, + }, + }, + } + + // Set MultiClusterObservability instance as the owner and controller + if err := controllerutil.SetControllerReference(mco, apiGateway, scheme); err != nil { + return &ctrl.Result{}, err + } + + err := runclient.Get( + context.TODO(), + types.NamespacedName{Name: apiGateway.Name, Namespace: apiGateway.Namespace}, + &routev1.Route{}) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating a new route to expose observatorium api", + "apiGateway.Namespace", apiGateway.Namespace, + "apiGateway.Name", apiGateway.Name, + ) + err = runclient.Create(context.TODO(), apiGateway) + if err != nil { + return &ctrl.Result{}, err + } + } + + return nil, nil +} + +func newDefaultObservatoriumSpec(mco *mcov1beta2.MultiClusterObservability, + scSelected string) *obsv1alpha1.ObservatoriumSpec { + + obs := &obsv1alpha1.ObservatoriumSpec{} + obs.SecurityContext = &v1.SecurityContext{} + obs.PullSecret = mcoconfig.GetImagePullSecret(mco.Spec) + obs.NodeSelector = mco.Spec.NodeSelector + obs.Tolerations = mco.Spec.Tolerations + obs.API = newAPISpec(mco) + obs.Thanos = newThanosSpec(mco, scSelected) + if util.ProxyEnvVarsAreSet() { + obs.EnvVars = newEnvVars() + } + + obs.Hashrings = []*obsv1alpha1.Hashring{ + {Hashring: "default", Tenants: []string{mcoconfig.GetTenantUID()}}, + } + + obs.ObjectStorageConfig.Thanos = &obsv1alpha1.ThanosObjectStorageConfigSpec{} + if mco.Spec.StorageConfig != nil && mco.Spec.StorageConfig.MetricObjectStorage != nil { + objStorageConf := mco.Spec.StorageConfig.MetricObjectStorage + obs.ObjectStorageConfig.Thanos.Name = objStorageConf.Name + obs.ObjectStorageConfig.Thanos.Key = objStorageConf.Key + } + return obs +} + +// return proxy variables +// OLM set these environment variables as a unit +func newEnvVars() map[string]string { + return map[string]string{ + "HTTP_PROXY": os.Getenv("HTTP_PROXY"), + "HTTPS_PROXY": os.Getenv("HTTPS_PROXY"), + "NO_PROXY": os.Getenv("NO_PROXY"), + } +} + +func newAPIRBAC() obsv1alpha1.APIRBAC { + return obsv1alpha1.APIRBAC{ + Roles: []obsv1alpha1.RBACRole{ + { + Name: readOnlyRoleName, + Resources: []string{ + "metrics", + }, + Permissions: []obsv1alpha1.Permission{ + obsv1alpha1.Read, + }, + Tenants: []string{ + mcoconfig.GetDefaultTenantName(), + }, + }, + { + Name: writeOnlyRoleName, + Resources: []string{ + "metrics", + }, + Permissions: []obsv1alpha1.Permission{ + obsv1alpha1.Write, + }, + Tenants: []string{ + mcoconfig.GetDefaultTenantName(), + }, + }, + }, + RoleBindings: []obsv1alpha1.RBACRoleBinding{ + { + Name: readOnlyRoleName, + Roles: []string{ + readOnlyRoleName, + }, + Subjects: []obsv1alpha1.Subject{ + { + Name: config.GrafanaCN, + Kind: obsv1alpha1.User, + }, + }, + }, + { + Name: writeOnlyRoleName, + Roles: []string{ + writeOnlyRoleName, + }, + Subjects: []obsv1alpha1.Subject{ + { + Name: config.ManagedClusterOU, + Kind: obsv1alpha1.Group, + }, + }, + }, + }, + } +} + +func newAPITenants() []obsv1alpha1.APITenant { + return []obsv1alpha1.APITenant{ + { + Name: mcoconfig.GetDefaultTenantName(), + ID: mcoconfig.GetTenantUID(), + MTLS: &obsv1alpha1.TenantMTLS{ + SecretName: config.ClientCACerts, + CAKey: "tls.crt", + }, + }, + } +} + +func newAPITLS() obsv1alpha1.TLS { + return obsv1alpha1.TLS{ + SecretName: config.ServerCerts, + CertKey: "tls.crt", + KeyKey: "tls.key", + CAKey: "ca.crt", + ServerName: config.ServerCertCN, + } +} + +func newAPISpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.APISpec { + apiSpec := obsv1alpha1.APISpec{} + apiSpec.RBAC = newAPIRBAC() + apiSpec.Tenants = newAPITenants() + apiSpec.TLS = newAPITLS() + apiSpec.Replicas = mcoconfig.GetReplicas(mcoconfig.ObservatoriumAPI, mco.Spec.AdvancedConfig) + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + apiSpec.Resources = mcoconfig.GetResources(config.ObservatoriumAPI, mco.Spec.AdvancedConfig) + } + //set the default observatorium components' image + apiSpec.Image = mcoconfig.DefaultImgRepository + "/" + mcoconfig.ObservatoriumAPIImgName + + ":" + mcoconfig.DefaultImgTagSuffix + replace, image := mcoconfig.ReplaceImage(mco.Annotations, apiSpec.Image, mcoconfig.ObservatoriumAPIImgName) + if replace { + apiSpec.Image = image + } + apiSpec.ServiceMonitor = true + return apiSpec +} + +func newReceiversSpec( + mco *mcov1beta2.MultiClusterObservability, + scSelected string) obsv1alpha1.ReceiversSpec { + receSpec := obsv1alpha1.ReceiversSpec{} + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.RetentionInLocal != "" { + receSpec.Retention = mco.Spec.AdvancedConfig.RetentionConfig.RetentionInLocal + } else { + receSpec.Retention = mcoconfig.RetentionInLocal + } + + receSpec.Replicas = mcoconfig.GetReplicas(mcoconfig.ThanosReceive, mco.Spec.AdvancedConfig) + if *receSpec.Replicas < 3 { + receSpec.ReplicationFactor = receSpec.Replicas + } else { + receSpec.ReplicationFactor = &config.Replicas3 + } + + receSpec.ServiceMonitor = true + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + receSpec.Resources = mcoconfig.GetResources(config.ThanosReceive, mco.Spec.AdvancedConfig) + } + receSpec.VolumeClaimTemplate = newVolumeClaimTemplate( + mco.Spec.StorageConfig.ReceiveStorageSize, + scSelected) + + return receSpec +} + +func newRuleSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) obsv1alpha1.RuleSpec { + ruleSpec := obsv1alpha1.RuleSpec{} + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.BlockDuration != "" { + ruleSpec.BlockDuration = mco.Spec.AdvancedConfig.RetentionConfig.BlockDuration + } else { + ruleSpec.BlockDuration = mcoconfig.BlockDuration + } + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.RetentionInLocal != "" { + ruleSpec.Retention = mco.Spec.AdvancedConfig.RetentionConfig.RetentionInLocal + } else { + ruleSpec.Retention = mcoconfig.RetentionInLocal + } + + if mco.Spec.AdvancedConfig != nil && + mco.Spec.AdvancedConfig.Rule != nil && + len(mco.Spec.AdvancedConfig.Rule.EvalInterval) > 0 { + ruleSpec.EvalInterval = mco.Spec.AdvancedConfig.Rule.EvalInterval + } else { + ruleSpec.EvalInterval = fmt.Sprintf("%ds", mco.Spec.ObservabilityAddonSpec.Interval) + } + ruleSpec.Replicas = mcoconfig.GetReplicas(mcoconfig.ThanosRule, mco.Spec.AdvancedConfig) + + ruleSpec.ServiceMonitor = true + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + ruleSpec.Resources = mcoconfig.GetResources(config.ThanosRule, mco.Spec.AdvancedConfig) + ruleSpec.ReloaderResources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(mcoconfig.ThanosRuleReloaderCPURequets), + v1.ResourceName(v1.ResourceMemory): resource.MustParse(mcoconfig.ThanosRuleReloaderMemoryRequets), + }, + } + } + + ruleSpec.ReloaderImage = mcoconfig.ConfigmapReloaderImgRepo + "/" + + mcoconfig.ConfigmapReloaderImgName + ":" + mcoconfig.ConfigmapReloaderImgTagSuffix + found, reloaderImage := mcoconfig.ReplaceImage(mco.Annotations, + mcoconfig.ConfigmapReloaderImgRepo, mcoconfig.ConfigmapReloaderKey) + if found { + ruleSpec.ReloaderImage = reloaderImage + } + + ruleSpec.VolumeClaimTemplate = newVolumeClaimTemplate( + mco.Spec.StorageConfig.RuleStorageSize, + scSelected) + + //configure alertmanager in ruler + //ruleSpec.AlertmanagerURLs = []string{mcoconfig.AlertmanagerURL} + ruleSpec.AlertmanagerConfigFile = obsv1alpha1.AlertmanagerConfigFile{ + Name: mcoconfig.AlertmanagersDefaultConfigMapName, + Key: mcoconfig.AlertmanagersDefaultConfigFileKey, + } + + ruleSpec.ExtraVolumeMounts = []obsv1alpha1.VolumeMount{ + { + Type: obsv1alpha1.VolumeMountTypeConfigMap, + MountPath: mcoconfig.AlertmanagersDefaultCaBundleMountPath, + Name: mcoconfig.AlertmanagersDefaultCaBundleName, + Key: mcoconfig.AlertmanagersDefaultCaBundleKey, + }, + } + + ruleSpec.RulesConfig = []obsv1alpha1.RuleConfig{ + { + Name: mcoconfig.AlertRuleDefaultConfigMapName, + Key: mcoconfig.AlertRuleDefaultFileKey, + }, + } + + if mcoconfig.HasCustomRuleConfigMap() { + customRuleConfig := []obsv1alpha1.RuleConfig{ + { + Name: mcoconfig.AlertRuleCustomConfigMapName, + Key: mcoconfig.AlertRuleCustomFileKey, + }, + } + ruleSpec.RulesConfig = append(ruleSpec.RulesConfig, customRuleConfig...) + } else { + ruleSpec.RulesConfig = []obsv1alpha1.RuleConfig{ + { + Name: mcoconfig.AlertRuleDefaultConfigMapName, + Key: mcoconfig.AlertRuleDefaultFileKey, + }, + } + } + + return ruleSpec +} + +func newStoreSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) obsv1alpha1.StoreSpec { + storeSpec := obsv1alpha1.StoreSpec{} + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + storeSpec.Resources = mcoconfig.GetResources(config.ThanosStoreShard, mco.Spec.AdvancedConfig) + } + + storeSpec.VolumeClaimTemplate = newVolumeClaimTemplate( + mco.Spec.StorageConfig.StoreStorageSize, + scSelected) + + storeSpec.Shards = mcoconfig.GetReplicas(mcoconfig.ThanosStoreShard, mco.Spec.AdvancedConfig) + storeSpec.ServiceMonitor = true + storeSpec.Cache = newMemCacheSpec(mcoconfig.ThanosStoreMemcached, mco) + + return storeSpec +} + +func newMemCacheSpec(component string, mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.MemCacheSpec { + var cacheConfig *mcov1beta2.CacheConfig + if mco.Spec.AdvancedConfig != nil { + if component == mcoconfig.ThanosStoreMemcached { + cacheConfig = mco.Spec.AdvancedConfig.StoreMemcached + } else { + cacheConfig = mco.Spec.AdvancedConfig.QueryFrontendMemcached + } + } + memCacheSpec := obsv1alpha1.MemCacheSpec{} + memCacheSpec.Image = mcoconfig.MemcachedImgRepo + "/" + + mcoconfig.MemcachedImgName + ":" + mcoconfig.MemcachedImgTag + memCacheSpec.Version = mcoconfig.MemcachedImgTag + memCacheSpec.Replicas = mcoconfig.GetReplicas(component, mco.Spec.AdvancedConfig) + + memCacheSpec.ServiceMonitor = true + memCacheSpec.ExporterImage = mcoconfig.MemcachedExporterImgRepo + "/" + + mcoconfig.MemcachedExporterImgName + ":" + mcoconfig.MemcachedExporterImgTag + memCacheSpec.ExporterVersion = mcoconfig.MemcachedExporterImgTag + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + memCacheSpec.Resources = mcoconfig.GetResources(component, mco.Spec.AdvancedConfig) + memCacheSpec.ExporterResources = mcoconfig.GetResources(mcoconfig.MemcachedExporter, mco.Spec.AdvancedConfig) + } + + found, image := mcoconfig.ReplaceImage(mco.Annotations, memCacheSpec.Image, mcoconfig.MemcachedImgName) + if found { + memCacheSpec.Image = image + } + + found, image = mcoconfig.ReplaceImage(mco.Annotations, memCacheSpec.ExporterImage, mcoconfig.MemcachedExporterKey) + if found { + memCacheSpec.ExporterImage = image + } + if cacheConfig != nil && cacheConfig.MemoryLimitMB != nil { + memCacheSpec.MemoryLimitMB = cacheConfig.MemoryLimitMB + } else { + memCacheSpec.MemoryLimitMB = &mcoconfig.MemoryLimitMB + } + if cacheConfig != nil && cacheConfig.ConnectionLimit != nil { + memCacheSpec.ConnectionLimit = cacheConfig.ConnectionLimit + } else { + memCacheSpec.ConnectionLimit = &mcoconfig.ConnectionLimit + } + if cacheConfig != nil && cacheConfig.MaxItemSize != "" { + memCacheSpec.MaxItemSize = cacheConfig.MaxItemSize + } else { + memCacheSpec.MaxItemSize = mcoconfig.MaxItemSize + } + + return memCacheSpec +} + +func newThanosSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) obsv1alpha1.ThanosSpec { + thanosSpec := obsv1alpha1.ThanosSpec{} + thanosSpec.Image = mcoconfig.DefaultImgRepository + "/" + mcoconfig.ThanosImgName + + ":" + mcoconfig.DefaultImgTagSuffix + + thanosSpec.Compact = newCompactSpec(mco, scSelected) + thanosSpec.Receivers = newReceiversSpec(mco, scSelected) + thanosSpec.Rule = newRuleSpec(mco, scSelected) + thanosSpec.Store = newStoreSpec(mco, scSelected) + thanosSpec.ReceiveController = newReceiverControllerSpec(mco) + thanosSpec.Query = newQuerySpec(mco) + thanosSpec.QueryFrontend = newQueryFrontendSpec(mco) + + replace, image := mcoconfig.ReplaceImage(mco.Annotations, thanosSpec.Image, mcoconfig.ThanosImgName) + if replace { + thanosSpec.Image = image + } + return thanosSpec +} + +func newQueryFrontendSpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.QueryFrontendSpec { + queryFrontendSpec := obsv1alpha1.QueryFrontendSpec{} + queryFrontendSpec.Replicas = mcoconfig.GetReplicas(mcoconfig.ThanosQueryFrontend, mco.Spec.AdvancedConfig) + queryFrontendSpec.ServiceMonitor = true + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + queryFrontendSpec.Resources = mcoconfig.GetResources(config.ThanosQueryFrontend, mco.Spec.AdvancedConfig) + } + queryFrontendSpec.Cache = newMemCacheSpec(mcoconfig.ThanosQueryFrontendMemcached, mco) + return queryFrontendSpec +} + +func newQuerySpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.QuerySpec { + querySpec := obsv1alpha1.QuerySpec{} + querySpec.Replicas = mcoconfig.GetReplicas(mcoconfig.ThanosQuery, mco.Spec.AdvancedConfig) + querySpec.ServiceMonitor = true + // only set lookback-delta when the scrape interval * 2 is larger than 5 minute, + // otherwise default value(5m) will be used. + if mco.Spec.ObservabilityAddonSpec.Interval*2 > 300 { + querySpec.LookbackDelta = fmt.Sprintf("%ds", mco.Spec.ObservabilityAddonSpec.Interval*2) + } + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + querySpec.Resources = mcoconfig.GetResources(config.ThanosQuery, mco.Spec.AdvancedConfig) + } + return querySpec +} + +func newReceiverControllerSpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.ReceiveControllerSpec { + receiveControllerSpec := obsv1alpha1.ReceiveControllerSpec{} + receiveControllerSpec.Image = mcoconfig.ObservatoriumImgRepo + "/" + + mcoconfig.ThanosReceiveControllerImgName + + ":" + mcoconfig.ThanosReceiveControllerImgTag + receiveControllerSpec.ServiceMonitor = true + receiveControllerSpec.Version = mcoconfig.ThanosReceiveControllerImgTag + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + receiveControllerSpec.Resources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse(mcoconfig.ObservatoriumReceiveControllerCPURequets), + v1.ResourceName(v1.ResourceMemory): resource.MustParse(mcoconfig.ObservatoriumReceiveControllerMemoryRequets), + }, + } + } + replace, image := mcoconfig.ReplaceImage(mco.Annotations, receiveControllerSpec.Image, + mcoconfig.ThanosReceiveControllerKey) + if replace { + receiveControllerSpec.Image = image + } + return receiveControllerSpec +} + +func newCompactSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) obsv1alpha1.CompactSpec { + compactSpec := obsv1alpha1.CompactSpec{} + //Compactor, generally, does not need to be highly available. + //Compactions are needed from time to time, only when new blocks appear. + compactSpec.Replicas = &mcoconfig.Replicas1 + if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { + compactSpec.Resources = mcoconfig.GetResources(config.ThanosCompact, mco.Spec.AdvancedConfig) + } + compactSpec.ServiceMonitor = true + compactSpec.EnableDownsampling = mco.Spec.EnableDownsampling + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.DeleteDelay != "" { + compactSpec.DeleteDelay = mco.Spec.AdvancedConfig.RetentionConfig.DeleteDelay + } else { + compactSpec.DeleteDelay = mcoconfig.DeleteDelay + } + + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolutionRaw != "" { + compactSpec.RetentionResolutionRaw = mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolutionRaw + } else { + compactSpec.RetentionResolutionRaw = mcoconfig.RetentionResolutionRaw + } + + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolution5m != "" { + compactSpec.RetentionResolution5m = mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolution5m + } else { + compactSpec.RetentionResolution5m = mcoconfig.RetentionResolution5m + } + + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.RetentionConfig != nil && + mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolution1h != "" { + compactSpec.RetentionResolution1h = mco.Spec.AdvancedConfig.RetentionConfig.RetentionResolution1h + } else { + compactSpec.RetentionResolution1h = mcoconfig.RetentionResolution1h + } + + compactSpec.VolumeClaimTemplate = newVolumeClaimTemplate( + mco.Spec.StorageConfig.CompactStorageSize, + scSelected) + + return compactSpec +} + +func newVolumeClaimTemplate(size string, storageClass string) obsv1alpha1.VolumeClaimTemplate { + vct := obsv1alpha1.VolumeClaimTemplate{} + vct.Spec = v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + StorageClassName: &storageClass, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(size), + }, + }, + } + return vct +} + +func mergeVolumeClaimTemplate(oldVolumn, + newVolumn obsv1alpha1.VolumeClaimTemplate) obsv1alpha1.VolumeClaimTemplate { + requestRes := newVolumn.Spec.Resources.Requests + limitRes := newVolumn.Spec.Resources.Limits + if requestRes != nil { + oldVolumn.Spec.Resources.Requests[v1.ResourceStorage] = requestRes[v1.ResourceStorage] + } + if limitRes != nil { + oldVolumn.Spec.Resources.Limits[v1.ResourceStorage] = limitRes[v1.ResourceStorage] + } + return oldVolumn +} + +func deleteStoreSts(cl client.Client, name string, oldNum int32, newNum int32) error { + if oldNum > newNum { + for i := newNum; i < oldNum; i++ { + stsName := fmt.Sprintf("%s-thanos-store-shard-%d", name, i) + found := &appsv1.StatefulSet{} + err := cl.Get(context.TODO(), types.NamespacedName{Name: stsName, Namespace: mcoconfig.GetDefaultNamespace()}, found) + if err != nil { + if !errors.IsNotFound(err) { + log.Error(err, "Failed to get statefulset", "name", stsName) + return err + } + } else { + err = cl.Delete(context.TODO(), found) + if err != nil { + log.Error(err, "Failed to delete statefulset", "name", stsName) + return err + } + } + } + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go new file mode 100644 index 000000000..92f879642 --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go @@ -0,0 +1,172 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "bytes" + "context" + "testing" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/yaml" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" +) + +var ( + storageClassName = "" +) + +func TestNewVolumeClaimTemplate(t *testing.T) { + vct := newVolumeClaimTemplate("10Gi", "test") + if vct.Spec.AccessModes[0] != v1.ReadWriteOnce || + vct.Spec.Resources.Requests[v1.ResourceStorage] != resource.MustParse("10Gi") { + t.Errorf("Failed to newVolumeClaimTemplate") + } +} + +func TestNewDefaultObservatoriumSpec(t *testing.T) { + statefulSetSize := "1Gi" + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + mcoconfig.AnnotationKeyImageRepository: "quay.io:443/acm-d", + mcoconfig.AnnotationKeyImageTagSuffix: "tag", + }, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "key", + Name: "name", + }, + StorageClass: storageClassName, + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 300, + }, + }, + } + + obs := newDefaultObservatoriumSpec(mco, storageClassName) + + receiversStorage := obs.Thanos.Receivers.VolumeClaimTemplate.Spec.Resources.Requests["storage"] + ruleStorage := obs.Thanos.Rule.VolumeClaimTemplate.Spec.Resources.Requests["storage"] + storeStorage := obs.Thanos.Store.VolumeClaimTemplate.Spec.Resources.Requests["storage"] + compactStorage := obs.Thanos.Compact.VolumeClaimTemplate.Spec.Resources.Requests["storage"] + obs = newDefaultObservatoriumSpec(mco, storageClassName) + if *obs.Thanos.Receivers.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || + *obs.Thanos.Rule.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || + *obs.Thanos.Store.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || + *obs.Thanos.Compact.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || + receiversStorage.String() != statefulSetSize || + ruleStorage.String() != statefulSetSize || + storeStorage.String() != statefulSetSize || + compactStorage.String() != statefulSetSize || + obs.ObjectStorageConfig.Thanos.Key != "key" || + obs.ObjectStorageConfig.Thanos.Name != "name" || + obs.Thanos.Query.LookbackDelta != "600s" { + t.Errorf("Failed to newDefaultObservatorium") + } +} + +func TestMergeVolumeClaimTemplate(t *testing.T) { + vct1 := newVolumeClaimTemplate("1Gi", "test") + vct3 := newVolumeClaimTemplate("3Gi", "test") + mergeVolumeClaimTemplate(vct1, vct3) + if vct1.Spec.Resources.Requests[v1.ResourceStorage] != resource.MustParse("3Gi") { + t.Errorf("Failed to merge %v to %v", vct3, vct1) + } +} + +func TestNoUpdateObservatoriumCR(t *testing.T) { + var ( + namespace = mcoconfig.GetDefaultNamespace() + ) + + // A MultiClusterObservability object with metadata and spec. + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: mcoconfig.GetDefaultCRName(), + Annotations: map[string]string{ + mcoconfig.AnnotationKeyImageTagSuffix: "tag", + }, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + StorageClass: storageClassName, + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 300, + }, + }, + } + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + + objs := []runtime.Object{mco} + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + mcoconfig.SetOperandNames(cl) + + _, err := GenerateObservatoriumCR(cl, s, mco) + if err != nil { + t.Errorf("Failed to create observatorium due to %v", err) + } + + // Check if this Observatorium CR already exists + observatoriumCRFound := &observatoriumv1alpha1.Observatorium{} + cl.Get( + context.TODO(), + types.NamespacedName{ + Name: mcoconfig.GetDefaultCRName(), + Namespace: namespace, + }, + observatoriumCRFound, + ) + + oldSpec := observatoriumCRFound.Spec + newSpec := newDefaultObservatoriumSpec(mco, storageClassName) + oldSpecBytes, _ := yaml.Marshal(oldSpec) + newSpecBytes, _ := yaml.Marshal(newSpec) + + if res := bytes.Compare(newSpecBytes, oldSpecBytes); res != 0 { + t.Errorf("%v should be equal to %v", string(oldSpecBytes), string(newSpecBytes)) + } + + _, err = GenerateObservatoriumCR(cl, s, mco) + if err != nil { + t.Errorf("Failed to update observatorium due to %v", err) + } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go new file mode 100644 index 000000000..3aba5c82e --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go @@ -0,0 +1,96 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +var ( + storageVersionMigrationPrefix = "storage-version-migration" +) + +// createOrUpdateObservabilityStorageVersionMigrationResource create or update the StorageVersionMigration resource +func createOrUpdateObservabilityStorageVersionMigrationResource(client client.Client, scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability) error { + storageVersionMigrationName := storageVersionMigrationPrefix + if mco != nil { + storageVersionMigrationName += mco.GetName() + } + storageVersionMigration := &migrationv1alpha1.StorageVersionMigration{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageVersionMigrationName, + }, + Spec: migrationv1alpha1.StorageVersionMigrationSpec{ + Resource: migrationv1alpha1.GroupVersionResource{ + Group: mcov1beta2.GroupVersion.Group, + Version: mcov1beta2.GroupVersion.Version, + Resource: config.MCORsName, + }, + }, + } + + found := &migrationv1alpha1.StorageVersionMigration{} + err := client.Get(context.TODO(), types.NamespacedName{Name: storageVersionMigrationName}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating StorageVersionMigration", "name", storageVersionMigrationName) + err = client.Create(context.TODO(), storageVersionMigration) + if err != nil { + log.Error(err, "Failed to create StorageVersionMigration", "name", storageVersionMigrationName) + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check StorageVersionMigration", "name", storageVersionMigrationName) + return err + } + + if !reflect.DeepEqual(found.Spec, storageVersionMigration.Spec) { + log.Info("Updating StorageVersionMigration", "name", storageVersionMigrationName) + storageVersionMigration.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = client.Update(context.TODO(), storageVersionMigration) + if err != nil { + log.Error(err, "Failed to update StorageVersionMigration", "name", storageVersionMigrationName) + return err + } + return nil + } + + log.Info("StorageVersionMigration already existed/unchanged", "name", storageVersionMigrationName) + return nil +} + +// cleanObservabilityStorageVersionMigrationResource delete the StorageVersionMigration source if found +func cleanObservabilityStorageVersionMigrationResource(client client.Client, mco *mcov1beta2.MultiClusterObservability) error { + storageVersionMigrationName := storageVersionMigrationPrefix + if mco != nil { + storageVersionMigrationName += mco.GetName() + } + found := &migrationv1alpha1.StorageVersionMigration{} + err := client.Get(context.TODO(), types.NamespacedName{Name: storageVersionMigrationName}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("StorageVersionMigration doesn't exist", "name", storageVersionMigrationName) + } else if err != nil { + log.Error(err, "Failed to check StorageVersionMigration", "name", storageVersionMigrationName) + return err + } else { + err = client.Delete(context.TODO(), found) + if err != nil { + log.Error(err, "Failed to delete StorageVersionMigration", "name", storageVersionMigrationName) + return err + } + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go new file mode 100644 index 000000000..e8d3e0fbc --- /dev/null +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go @@ -0,0 +1,90 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package multiclusterobservability + +import ( + "context" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +func TestCreateOrUpdateObservabilityStorageVersionMigrationResource(t *testing.T) { + var ( + name = "observability" + namespace = mcoconfig.GetDefaultNamespace() + ) + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{}, + } + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + migrationv1alpha1.SchemeBuilder.AddToScheme(s) + + c := fake.NewFakeClient() + + // test scenario of creating StorageVersionMigration + err := createOrUpdateObservabilityStorageVersionMigrationResource(c, s, mco) + if err != nil { + t.Fatalf("createOrUpdateObservabilityStorageVersionMigrationResource: (%v)", err) + } + + // Test scenario in which StorageVersionMigration updated by others + svmName := storageVersionMigrationPrefix + mco.GetName() + svm := &migrationv1alpha1.StorageVersionMigration{ + ObjectMeta: metav1.ObjectMeta{ + Name: svmName, + }, + Spec: migrationv1alpha1.StorageVersionMigrationSpec{ + Resource: migrationv1alpha1.GroupVersionResource{ + Group: mcov1beta2.GroupVersion.Group, + Resource: mcoconfig.MCORsName, + }, + }, + } + c = fake.NewFakeClient(svm) + err = createOrUpdateObservabilityStorageVersionMigrationResource(c, s, mco) + if err != nil { + t.Fatalf("createOrUpdateObservabilityStorageVersionMigrationResource: (%v)", err) + } + + foundSvm := &migrationv1alpha1.StorageVersionMigration{} + err = c.Get(context.TODO(), types.NamespacedName{Name: svmName}, foundSvm) + if err != nil { + t.Fatalf("Failed to get StorageVersionMigration (%s): (%v)", svmName, err) + } + if foundSvm.Spec.Resource.Version != mcov1beta2.GroupVersion.Version { + t.Fatalf("Failed to update StorageVersionMigration (%s)", svmName) + } + + err = cleanObservabilityStorageVersionMigrationResource(c, mco) + if err != nil { + t.Fatalf("Failed to clean the StorageVersionMigration") + } + + // Test clean scenario in which StorageVersionMigration is already removed + err = createOrUpdateObservabilityStorageVersionMigrationResource(c, s, mco) + if err != nil { + t.Fatalf("Failed to StorageVersionMigration: (%v)", err) + } + + err = c.Delete(context.TODO(), svm) + if err != nil { + t.Fatalf("Failed to delete (%s): (%v)", svmName, err) + } + + err = cleanObservabilityStorageVersionMigrationResource(c, mco) + if err != nil { + t.Fatalf("Failed to clean the StorageVersionMigration") + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/customize_img.go b/operators/multiclusterobservability/controllers/placementrule/customize_img.go new file mode 100644 index 000000000..07c6b3823 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/customize_img.go @@ -0,0 +1,30 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "sync" + + "sigs.k8s.io/controller-runtime/pkg/client" + + imageregistryv1alpha1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/imageregistry/v1alpha1" + "github.com/stolostron/multicloud-operators-foundation/pkg/helpers/imageregistry" +) + +var ( + managedClusterImageRegistry = map[string]string{} + managedClusterImageRegistryMutex = &sync.RWMutex{} +) + +func updateManagedClusterImageRegistry(obj client.Object) { + if imageReg, ok := obj.GetLabels()[imageregistryv1alpha1.ClusterImageRegistryLabel]; ok { + managedClusterImageRegistryMutex.Lock() + managedClusterImageRegistry[obj.GetName()] = imageReg + managedClusterImageRegistryMutex.Unlock() + } +} + +func NewImageRegistryClient(c client.Client) imageregistry.Client { + return imageregistry.NewDefaultClient(c) +} diff --git a/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go b/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go new file mode 100644 index 000000000..7ca9710d4 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go @@ -0,0 +1,179 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering/templates" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + deployName = "endpoint-observability-operator" + saName = "endpoint-observability-operator-sa" + rolebindingName = "open-cluster-management:endpoint-observability-operator-rb" +) + +// loadTemplates load manifests from manifests directory +func loadTemplates(mco *mcov1beta2.MultiClusterObservability) ( + []runtime.RawExtension, + *apiextensionsv1.CustomResourceDefinition, + *apiextensionsv1beta1.CustomResourceDefinition, + *appsv1.Deployment, + *corev1.ConfigMap, + error) { + // render endpoint-observability templates + endpointObsTemplates, err := templates.GetOrLoadEndpointObservabilityTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + log.Error(err, "Failed to load templates") + return nil, nil, nil, nil, nil, err + } + crdv1 := &apiextensionsv1.CustomResourceDefinition{} + crdv1beta1 := &apiextensionsv1beta1.CustomResourceDefinition{} + dep := &appsv1.Deployment{} + imageListCM := &corev1.ConfigMap{} + rawExtensionList := []runtime.RawExtension{} + for _, r := range endpointObsTemplates { + obj, err := updateRes(r, mco) + if err != nil { + return nil, nil, nil, nil, nil, err + } + if r.GetKind() == "Deployment" { + dep = obj.(*appsv1.Deployment) + } else if r.GetKind() == "ConfigMap" && r.GetName() == operatorconfig.ImageConfigMap { + imageListCM = obj.(*corev1.ConfigMap) + } else if r.GetKind() == "CustomResourceDefinition" { + if r.GetGvk().Version == "v1" { + crdv1 = obj.(*apiextensionsv1.CustomResourceDefinition) + } else { + crdv1beta1 = obj.(*apiextensionsv1beta1.CustomResourceDefinition) + } + } else { + rawExtensionList = append(rawExtensionList, runtime.RawExtension{Object: obj}) + } + } + return rawExtensionList, crdv1, crdv1beta1, dep, imageListCM, nil +} + +func updateRes(r *resource.Resource, + mco *mcov1beta2.MultiClusterObservability) (runtime.Object, error) { + + kind := r.GetKind() + if kind != "ClusterRole" && kind != "ClusterRoleBinding" && kind != "CustomResourceDefinition" { + r.SetNamespace(spokeNameSpace) + } + obj := util.GetK8sObj(kind) + if kind == "CustomResourceDefinition" && r.GetGvk().Version == "v1beta1" { + obj = &apiextensionsv1beta1.CustomResourceDefinition{} + } + obj.GetObjectKind() + err := runtime.DefaultUnstructuredConverter.FromUnstructured(r.Map(), obj) + if err != nil { + log.Error(err, "failed to convert the resource", "resource", r.GetName()) + return nil, err + } + // set the images and watch_namespace for endpoint metrics operator + if r.GetKind() == "Deployment" && r.GetName() == deployName { + spec := obj.(*appsv1.Deployment).Spec.Template.Spec + for i, container := range spec.Containers { + if container.Name == "endpoint-observability-operator" { + spec.Containers[i] = updateEndpointOperator(mco, container) + } + } + } + // set the imagepullsecrets for sa + if r.GetKind() == "ServiceAccount" && r.GetName() == saName { + imageSecrets := obj.(*corev1.ServiceAccount).ImagePullSecrets + for i, imageSecret := range imageSecrets { + if imageSecret.Name == "REPLACE_WITH_IMAGEPULLSECRET" { + imageSecrets[i].Name = mcoconfig.GetImagePullSecret(mco.Spec) + break + } + } + } + // set namespace for rolebinding + if r.GetKind() == "ClusterRoleBinding" && r.GetName() == rolebindingName { + binding := obj.(*rbacv1.ClusterRoleBinding) + binding.Subjects[0].Namespace = spokeNameSpace + } + // set images for components in managed clusters + if r.GetKind() == "ConfigMap" && r.GetName() == operatorconfig.ImageConfigMap { + images := obj.(*corev1.ConfigMap).Data + for key, _ := range images { + if key == operatorconfig.ConfigmapReloaderKey { + found, image := mcoconfig.ReplaceImage( + mco.Annotations, + mcoconfig.ConfigmapReloaderImgRepo+"/"+operatorconfig.ImageKeyNameMap[operatorconfig.ConfigmapReloaderKey], + key) + if found { + obj.(*corev1.ConfigMap).Data[key] = image + } + } else { + found, image := mcoconfig.ReplaceImage( + mco.Annotations, + mcoconfig.DefaultImgRepository+"/"+operatorconfig.ImageKeyNameMap[key], + key) + if found { + obj.(*corev1.ConfigMap).Data[key] = image + } + } + } + } + + return obj, nil +} + +func updateEndpointOperator(mco *mcov1beta2.MultiClusterObservability, + container corev1.Container) corev1.Container { + container.Image = getImage(mco, mcoconfig.EndpointControllerImgName, + mcoconfig.DefaultImgTagSuffix, mcoconfig.EndpointControllerKey) + container.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) + for i, env := range container.Env { + if env.Name == operatorconfig.PullSecret { + container.Env[i].Value = mcoconfig.GetImagePullSecret(mco.Spec) + } + } + return container +} + +func getImage(mco *mcov1beta2.MultiClusterObservability, + name, tag, key string) string { + image := mcoconfig.DefaultImgRepository + + "/" + name + ":" + tag + found, replacedImage := mcoconfig.ReplaceImage(mco.Annotations, image, key) + if found { + return replacedImage + } + return image +} + +func loadPromTemplates(mco *mcov1beta2.MultiClusterObservability) ( + []runtime.RawExtension, error) { + // load and render promTemplates + promTemplates, err := templates.GetOrLoadPrometheusTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + log.Error(err, "Failed to load templates") + return nil, err + } + rawExtensionList := []runtime.RawExtension{} + for _, r := range promTemplates { + obj, err := updateRes(r, mco) + if err != nil { + return nil, err + } + rawExtensionList = append(rawExtensionList, runtime.RawExtension{Object: obj}) + } + return rawExtensionList, nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go new file mode 100644 index 000000000..ea4e9af18 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go @@ -0,0 +1,88 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "net/url" + + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +// generateHubInfoSecret generates the secret that contains hubInfo. +// this function should only called when the watched resources are created/updated +func generateHubInfoSecret(client client.Client, obsNamespace string, + namespace string, ingressCtlCrdExists bool) (*corev1.Secret, error) { + + obsApiRouteHost := "" + alertmanagerEndpoint := "" + alertmanagerRouterCA := "" + + if ingressCtlCrdExists { + var err error + obsApiRouteHost, err = config.GetObsAPIHost(client, obsNamespace) + if err != nil { + log.Error(err, "Failed to get the host for observatorium API route") + return nil, err + } + + alertmanagerEndpoint, err = config.GetAlertmanagerEndpoint(client, obsNamespace) + if err != nil { + log.Error(err, "Failed to get alertmanager endpoint") + return nil, err + } + + alertmanagerRouterCA, err = config.GetAlertmanagerRouterCA(client) + if err != nil { + log.Error(err, "Failed to CA of openshift Route") + return nil, err + } + } else { + // for KinD support, the managedcluster and hub cluster are assumed in the same cluster, the observatorium-api will be accessed through k8s service FQDN + port + obsApiRouteHost = config.GetOperandNamePrefix() + "observatorium-api" + "." + config.GetDefaultNamespace() + ".svc.cluster.local:8080" + alertmanagerEndpoint = config.AlertmanagerServiceName + "." + config.GetDefaultNamespace() + ".svc.cluster.local:9095" + var err error + alertmanagerRouterCA, err = config.GetAlertmanagerCA(client) + if err != nil { + log.Error(err, "Failed to CA of the Alertmanager") + return nil, err + } + } + + obsApiURL := url.URL{ + Host: obsApiRouteHost, + Path: operatorconfig.ObservatoriumAPIRemoteWritePath, + } + if !obsApiURL.IsAbs() { + obsApiURL.Scheme = "https" + } + + hubInfo := &operatorconfig.HubInfo{ + ObservatoriumAPIEndpoint: obsApiURL.String(), + AlertmanagerEndpoint: alertmanagerEndpoint, + AlertmanagerRouterCA: alertmanagerRouterCA, + } + configYaml, err := yaml.Marshal(hubInfo) + if err != nil { + return nil, err + } + configYamlMap := map[string][]byte{} + configYamlMap[operatorconfig.HubInfoSecretKey] = configYaml + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.HubInfoSecretName, + Namespace: namespace, + }, + Data: configYamlMap, + }, nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go new file mode 100644 index 000000000..32c472da5 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go @@ -0,0 +1,147 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "strings" + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +const ( + routeHost = "test-host" + routerCA = "test-ca" + routerBYOCA = "test-ca" + routerBYOCert = "test-cert" + routerBYOCertKey = "test-key" +) + +func newTestObsApiRoute() *routev1.Route { + return &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "observatorium-api", + Namespace: mcoNamespace, + }, + Spec: routev1.RouteSpec{ + Host: routeHost, + }, + } +} + +func newTestAlertmanagerRoute() *routev1.Route { + return &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerRouteName, + Namespace: mcoNamespace, + }, + Spec: routev1.RouteSpec{ + Host: routeHost, + }, + } +} + +func newTestIngressController() *operatorv1.IngressController { + return &operatorv1.IngressController{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.OpenshiftIngressOperatorCRName, + Namespace: config.OpenshiftIngressOperatorNamespace, + }, + Spec: operatorv1.IngressControllerSpec{ + DefaultCertificate: &corev1.LocalObjectReference{ + Name: "custom-certs-default", + }, + }, + } + +} + +func newTestRouteCASecret() *corev1.Secret { + configYamlMap := map[string][]byte{} + configYamlMap["tls.crt"] = []byte(routerCA) + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "custom-certs-default", + Namespace: config.OpenshiftIngressNamespace, + }, + Data: configYamlMap, + } +} + +func newTestAmRouteBYOCA() *corev1.Secret { + configYamlMap := map[string][]byte{} + configYamlMap["tls.crt"] = []byte(routerBYOCA) + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerRouteBYOCAName, + Namespace: mcoNamespace, + }, + Data: configYamlMap, + } +} + +func newTestAmRouteBYOCert() *corev1.Secret { + configYamlMap := map[string][]byte{} + configYamlMap["tls.crt"] = []byte(routerBYOCert) + configYamlMap["tls.key"] = []byte(routerBYOCertKey) + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerRouteBYOCERTName, + Namespace: mcoNamespace, + }, + Data: configYamlMap, + } +} + +func TestNewSecret(t *testing.T) { + initSchema(t) + + objs := []runtime.Object{newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestIngressController(), newTestRouteCASecret()} + c := fake.NewFakeClient(objs...) + + hubInfo, err := generateHubInfoSecret(c, mcoNamespace, namespace, true) + if err != nil { + t.Fatalf("Failed to initial the hub info secret: (%v)", err) + } + hub := &operatorconfig.HubInfo{} + err = yaml.Unmarshal(hubInfo.Data[operatorconfig.HubInfoSecretKey], &hub) + if err != nil { + t.Fatalf("Failed to unmarshal data in hub info secret (%v)", err) + } + if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != routeHost || hub.AlertmanagerRouterCA != routerCA { + t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://test-host"+" "+"test-host"+" "+routerCA) + } +} + +func TestNewBYOSecret(t *testing.T) { + initSchema(t) + + objs := []runtime.Object{newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestAmRouteBYOCA(), newTestAmRouteBYOCert()} + c := fake.NewFakeClient(objs...) + + hubInfo, err := generateHubInfoSecret(c, mcoNamespace, namespace, true) + if err != nil { + t.Fatalf("Failed to initial the hub info secret: (%v)", err) + } + hub := &operatorconfig.HubInfo{} + err = yaml.Unmarshal(hubInfo.Data[operatorconfig.HubInfoSecretKey], &hub) + if err != nil { + t.Fatalf("Failed to unmarshal data in hub info secret (%v)", err) + } + if !strings.HasPrefix(hub.ObservatoriumAPIEndpoint, "https://test-host") || hub.AlertmanagerEndpoint != routeHost || hub.AlertmanagerRouterCA != routerBYOCA { + t.Fatalf("Wrong content in hub info secret: \ngot: "+hub.ObservatoriumAPIEndpoint+" "+hub.AlertmanagerEndpoint+" "+hub.AlertmanagerRouterCA, clusterName+" "+"https://test-host"+" "+"test-host"+" "+routerBYOCA) + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/manifestwork.go b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go new file mode 100644 index 000000000..6672ddb4e --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go @@ -0,0 +1,629 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "gopkg.in/yaml.v2" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + workNameSuffix = "-observability" + localClusterName = "local-cluster" + workPostponeDeleteAnnoKey = "open-cluster-management/postpone-delete" +) + +// intermidiate resources for the manifest work +var ( + hubInfoSecret *corev1.Secret + pullSecret *corev1.Secret + managedClusterObsCert *corev1.Secret + metricsAllowlistConfigMap *corev1.ConfigMap + amAccessorTokenSecret *corev1.Secret + + obsAddonCRDv1 *apiextensionsv1.CustomResourceDefinition + obsAddonCRDv1beta1 *apiextensionsv1beta1.CustomResourceDefinition + endpointMetricsOperatorDeploy *appsv1.Deployment + imageListConfigMap *corev1.ConfigMap + + rawExtensionList []runtime.RawExtension + promRawExtensionList []runtime.RawExtension +) + +type MetricsAllowlist struct { + NameList []string `yaml:"names"` + MatchList []string `yaml:"matches"` + RenameMap map[string]string `yaml:"renames"` + RuleList []Rule `yaml:"rules"` +} + +// Rule is the struct for recording rules and alert rules +type Rule struct { + Record string `yaml:"record"` + Expr string `yaml:"expr"` +} + +func deleteManifestWork(c client.Client, name string, namespace string) error { + + addon := &workv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + err := c.Delete(context.TODO(), addon) + if err != nil && !k8serrors.IsNotFound(err) { + log.Error(err, "Failed to delete manifestworks", "name", name, "namespace", namespace) + return err + } + return nil +} + +func deleteManifestWorks(c client.Client, namespace string) error { + + err := c.DeleteAllOf(context.TODO(), &workv1.ManifestWork{}, + client.InNamespace(namespace), client.MatchingLabels{ownerLabelKey: ownerLabelValue}) + if err != nil { + log.Error(err, "Failed to delete observability manifestworks", "namespace", namespace) + } + return err +} + +func injectIntoWork(works []workv1.Manifest, obj runtime.Object) []workv1.Manifest { + works = append(works, + workv1.Manifest{ + RawExtension: runtime.RawExtension{ + Object: obj, + }, + }) + return works +} + +func newManifestwork(name string, namespace string) *workv1.ManifestWork { + return &workv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + Annotations: map[string]string{ + // Add the postpone delete annotation for manifestwork so that the observabilityaddon can be + // cleaned up before the manifestwork is deleted by the managedcluster-import-controller when + // the corresponding managedcluster is detached. + // Note the annotation value is currently not taking effect, because managedcluster-import-controller + // managedcluster-import-controller hard code the value to be 10m + workPostponeDeleteAnnoKey: "", + }, + }, + Spec: workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{}, + }, + }, + } +} + +// removePostponeDeleteAnnotationForManifestwork removes the postpone delete annotation for manifestwork so that +// the workagent can delete the manifestwork normally +func removePostponeDeleteAnnotationForManifestwork(c client.Client, namespace string) error { + name := namespace + workNameSuffix + found := &workv1.ManifestWork{} + err := c.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, found) + if err != nil { + log.Error(err, "failed to check manifestwork", "namespace", namespace, "name", name) + return err + } + + if found.GetAnnotations() != nil { + delete(found.GetAnnotations(), workPostponeDeleteAnnoKey) + } + + err = c.Update(context.TODO(), found) + if err != nil { + log.Error(err, "failed to update manifestwork", "namespace", namespace, "name", name) + return err + } + + return nil +} + +func createManifestwork(c client.Client, work *workv1.ManifestWork) error { + name := work.ObjectMeta.Name + namespace := work.ObjectMeta.Namespace + found := &workv1.ManifestWork{} + err := c.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, found) + if err != nil && k8serrors.IsNotFound(err) { + log.Info("Creating manifestwork", "namespace", namespace, "name", name) + + err = c.Create(context.TODO(), work) + if err != nil { + log.Error(err, "Failed to create manifestwork", "namespace", namespace, "name", name) + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check manifestwork", namespace, "name", name) + return err + } + + if found.GetDeletionTimestamp() != nil { + log.Info("Existing manifestwork is terminating, skip and reconcile later") + return errors.New("Existing manifestwork is terminating, skip and reconcile later") + } + + manifests := work.Spec.Workload.Manifests + updated := false + if len(found.Spec.Workload.Manifests) == len(manifests) { + for i, m := range found.Spec.Workload.Manifests { + if !util.CompareObject(m.RawExtension, manifests[i].RawExtension) { + updated = true + break + } + } + } else { + updated = true + } + + if updated { + log.Info("Updating manifestwork", namespace, namespace, "name", name) + found.Spec.Workload.Manifests = manifests + err = c.Update(context.TODO(), found) + if err != nil { + log.Error(err, "Failed to update monitoring-endpoint-monitoring-work work") + return err + } + return nil + } + + log.Info("manifestwork already existed/unchanged", "namespace", namespace) + return nil +} + +// generateGlobalManifestResources generates global resources, eg. manifestwork, +// endpoint-metrics-operator deploy and hubInfo Secret... +// this function is expensive and should not be called for each reconcile loop. +func generateGlobalManifestResources(c client.Client, mco *mcov1beta2.MultiClusterObservability) ( + []workv1.Manifest, *workv1.Manifest, *workv1.Manifest, error) { + + works := []workv1.Manifest{} + + // inject the namespace + works = injectIntoWork(works, generateNamespace()) + + // inject the image pull secret + if pullSecret == nil { + var err error + if pullSecret, err = generatePullSecret(c, config.GetImagePullSecret(mco.Spec)); err != nil { + return nil, nil, nil, err + } + } + + // inject the certificates + if managedClusterObsCert == nil { + var err error + if managedClusterObsCert, err = generateObservabilityServerCACerts(c); err != nil { + return nil, nil, nil, err + } + } + works = injectIntoWork(works, managedClusterObsCert) + + // inject the metrics allowlist configmap + if metricsAllowlistConfigMap == nil { + var err error + if metricsAllowlistConfigMap, err = generateMetricsListCM(c); err != nil { + return nil, nil, nil, err + } + } + works = injectIntoWork(works, metricsAllowlistConfigMap) + + // inject the alertmanager accessor bearer token secret + if amAccessorTokenSecret == nil { + var err error + if amAccessorTokenSecret, err = generateAmAccessorTokenSecret(c); err != nil { + return nil, nil, nil, err + } + } + works = injectIntoWork(works, amAccessorTokenSecret) + + // reload resources if empty + if len(rawExtensionList) == 0 || obsAddonCRDv1 == nil || obsAddonCRDv1beta1 == nil { + var err error + rawExtensionList, obsAddonCRDv1, obsAddonCRDv1beta1, + endpointMetricsOperatorDeploy, imageListConfigMap, err = loadTemplates(mco) + if err != nil { + return nil, nil, nil, err + } + } + // inject resouces in templates + crdv1Work := &workv1.Manifest{RawExtension: runtime.RawExtension{ + Object: obsAddonCRDv1, + }} + crdv1beta1Work := &workv1.Manifest{RawExtension: runtime.RawExtension{ + Object: obsAddonCRDv1beta1, + }} + for _, raw := range rawExtensionList { + works = append(works, workv1.Manifest{RawExtension: raw}) + } + + return works, crdv1Work, crdv1beta1Work, nil +} + +func createManifestWorks(c client.Client, restMapper meta.RESTMapper, + clusterNamespace string, clusterName string, + mco *mcov1beta2.MultiClusterObservability, + works []workv1.Manifest, crdWork *workv1.Manifest, dep *appsv1.Deployment, + hubInfo *corev1.Secret, installProm bool) error { + + work := newManifestwork(clusterNamespace+workNameSuffix, clusterNamespace) + + manifests := work.Spec.Workload.Manifests + // inject observabilityAddon + obaddon, err := getObservabilityAddon(c, clusterNamespace, mco) + if err != nil { + return err + } + if obaddon != nil { + manifests = injectIntoWork(manifests, obaddon) + } + + manifests = append(manifests, works...) + + if clusterName != localClusterName { + manifests = append(manifests, *crdWork) + } + + // replace the managedcluster image with the custom registry + managedClusterImageRegistryMutex.RLock() + _, hasCustomRegistry := managedClusterImageRegistry[clusterName] + managedClusterImageRegistryMutex.RUnlock() + imageRegistryClient := NewImageRegistryClient(c) + + // inject the endpoint operator deployment + spec := dep.Spec.Template.Spec + if clusterName == localClusterName { + spec.NodeSelector = mco.Spec.NodeSelector + spec.Tolerations = mco.Spec.Tolerations + } + for i, container := range spec.Containers { + if container.Name == "endpoint-observability-operator" { + for j, env := range container.Env { + if env.Name == "HUB_NAMESPACE" { + container.Env[j].Value = clusterNamespace + } + if env.Name == operatorconfig.InstallPrometheus { + container.Env[j].Value = strconv.FormatBool(installProm) + } + } + + if hasCustomRegistry { + oldImage := container.Image + newImage, err := imageRegistryClient.Cluster(clusterName).ImageOverride(oldImage) + log.Info("Replace the endpoint operator image", "cluster", clusterName, "newImage", newImage) + if err == nil { + spec.Containers[i].Image = newImage + } + } + } + } + dep.Spec.Template.Spec = spec + manifests = injectIntoWork(manifests, dep) + // replace the pull secret and addon components image + if hasCustomRegistry { + log.Info("Replace the default pull secret to custom pull secret", "cluster", clusterName) + customPullSecret, err := imageRegistryClient.Cluster(clusterName).PullSecret() + if err == nil && customPullSecret != nil { + customPullSecret.ResourceVersion = "" + customPullSecret.Name = config.GetImagePullSecret(mco.Spec) + customPullSecret.Namespace = spokeNameSpace + manifests = injectIntoWork(manifests, customPullSecret) + } + + log.Info("Replace the image list configmap with custom image", "cluster", clusterName) + newImageListCM := imageListConfigMap.DeepCopy() + images := newImageListCM.Data + for key, oldImage := range images { + newImage, err := imageRegistryClient.Cluster(clusterName).ImageOverride(oldImage) + if err == nil { + newImageListCM.Data[key] = newImage + } + } + manifests = injectIntoWork(manifests, newImageListCM) + } + + if pullSecret != nil && !hasCustomRegistry { + manifests = injectIntoWork(manifests, pullSecret) + } + + if !hasCustomRegistry { + manifests = injectIntoWork(manifests, imageListConfigMap) + } + + // inject the hub info secret + hubInfo.Data[operatorconfig.ClusterNameKey] = []byte(clusterName) + manifests = injectIntoWork(manifests, hubInfo) + + work.Spec.Workload.Manifests = manifests + + err = createManifestwork(c, work) + return err +} + +// generateAmAccessorTokenSecret generates the secret that contains the access_token for the Alertmanager in the Hub cluster +func generateAmAccessorTokenSecret(client client.Client) (*corev1.Secret, error) { + amAccessorSA := &corev1.ServiceAccount{} + err := client.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerAccessorSAName, + Namespace: config.GetDefaultNamespace()}, amAccessorSA) + if err != nil { + log.Error(err, "Failed to get Alertmanager accessor serviceaccount", "name", config.AlertmanagerAccessorSAName) + return nil, err + } + + tokenSrtName := "" + for _, secretRef := range amAccessorSA.Secrets { + if strings.HasPrefix(secretRef.Name, config.AlertmanagerAccessorSAName+"-token") { + tokenSrtName = secretRef.Name + break + } + } + + if tokenSrtName == "" { + log.Error(err, "no token secret for Alertmanager accessor serviceaccount", "name", config.AlertmanagerAccessorSAName) + return nil, fmt.Errorf("no token secret for Alertmanager accessor serviceaccount: %s", config.AlertmanagerAccessorSAName) + } + + tokenSrt := &corev1.Secret{} + err = client.Get(context.TODO(), types.NamespacedName{Name: tokenSrtName, + Namespace: config.GetDefaultNamespace()}, tokenSrt) + if err != nil { + log.Error(err, "Failed to get token secret for Alertmanager accessor serviceaccount", "name", tokenSrtName) + return nil, err + } + + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerAccessorSecretName, + Namespace: spokeNameSpace, + }, + Data: map[string][]byte{ + "token": tokenSrt.Data["token"], + }, + }, nil +} + +// generatePullSecret generates the image pull secret for mco +func generatePullSecret(c client.Client, name string) (*corev1.Secret, error) { + imagePullSecret := &corev1.Secret{} + err := c.Get(context.TODO(), + types.NamespacedName{ + Name: name, + Namespace: config.GetDefaultNamespace(), + }, imagePullSecret) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } else { + log.Error(err, "Failed to get the pull secret", "name", name) + return nil, err + } + } + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: imagePullSecret.Name, + Namespace: spokeNameSpace, + }, + Data: map[string][]byte{ + ".dockerconfigjson": imagePullSecret.Data[".dockerconfigjson"], + }, + Type: corev1.SecretTypeDockerConfigJson, + }, nil +} + +// generateObservabilityServerCACerts generates the certificate for managed cluster +func generateObservabilityServerCACerts(client client.Client) (*corev1.Secret, error) { + ca := &corev1.Secret{} + err := client.Get(context.TODO(), types.NamespacedName{Name: config.ServerCACerts, + Namespace: config.GetDefaultNamespace()}, ca) + if err != nil { + return nil, err + } + + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterObsCertName, + Namespace: spokeNameSpace, + }, + Data: map[string][]byte{ + "ca.crt": ca.Data["tls.crt"], + }, + }, nil +} + +// generateMetricsListCM generates the configmap that contains the metrics allowlist +func generateMetricsListCM(client client.Client) (*corev1.ConfigMap, error) { + metricsAllowlist := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.AllowlistConfigMapName, + Namespace: spokeNameSpace, + }, + Data: map[string]string{}, + } + + allowlist, err := getAllowList(client, operatorconfig.AllowlistConfigMapName) + if err != nil { + log.Error(err, "Failed to get metrics allowlist configmap "+operatorconfig.AllowlistConfigMapName) + return nil, err + } + + customAllowlist, err := getAllowList(client, config.AllowlistCustomConfigMapName) + if err == nil { + allowlist.NameList = mergeMetrics(allowlist.NameList, customAllowlist.NameList) + allowlist.MatchList = mergeMetrics(allowlist.MatchList, customAllowlist.MatchList) + allowlist.RuleList = append(allowlist.RuleList, customAllowlist.RuleList...) + for k, v := range customAllowlist.RenameMap { + allowlist.RenameMap[k] = v + } + } else { + log.Info("There is no custom metrics allowlist configmap in the cluster") + } + + data, err := yaml.Marshal(allowlist) + if err != nil { + log.Error(err, "Failed to marshal allowlist data") + return nil, err + } + metricsAllowlist.Data["metrics_list.yaml"] = string(data) + return metricsAllowlist, nil +} + +func getAllowList(client client.Client, name string) (*MetricsAllowlist, error) { + found := &corev1.ConfigMap{} + namespacedName := types.NamespacedName{ + Name: name, + Namespace: config.GetDefaultNamespace(), + } + err := client.Get(context.TODO(), namespacedName, found) + if err != nil { + return nil, err + } + allowlist := &MetricsAllowlist{} + err = yaml.Unmarshal([]byte(found.Data["metrics_list.yaml"]), allowlist) + if err != nil { + log.Error(err, "Failed to unmarshal data in configmap "+name) + return nil, err + } + return allowlist, nil +} + +func mergeMetrics(defaultAllowlist []string, customAllowlist []string) []string { + customMetrics := []string{} + deletedMetrics := map[string]bool{} + for _, name := range customAllowlist { + if !strings.HasPrefix(name, "-") { + customMetrics = append(customMetrics, name) + } else { + deletedMetrics[strings.TrimPrefix(name, "-")] = true + } + } + + metricsRecorder := map[string]bool{} + mergedMetrics := []string{} + defaultAllowlist = append(defaultAllowlist, customMetrics...) + for _, name := range defaultAllowlist { + if metricsRecorder[name] { + continue + } + + if !deletedMetrics[name] { + mergedMetrics = append(mergedMetrics, name) + metricsRecorder[name] = true + } + } + + return mergedMetrics +} + +func getObservabilityAddon(c client.Client, namespace string, + mco *mcov1beta2.MultiClusterObservability) (*mcov1beta1.ObservabilityAddon, error) { + found := &mcov1beta1.ObservabilityAddon{} + namespacedName := types.NamespacedName{ + Name: obsAddonName, + Namespace: namespace, + } + err := c.Get(context.TODO(), namespacedName, found) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } + log.Error(err, "Failed to check observabilityAddon") + return nil, err + } + if found.ObjectMeta.DeletionTimestamp != nil { + return nil, nil + } + + return &mcov1beta1.ObservabilityAddon{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "observability.open-cluster-management.io/v1beta1", + Kind: "ObservabilityAddon", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: obsAddonName, + Namespace: spokeNameSpace, + }, + Spec: mcoshared.ObservabilityAddonSpec{ + EnableMetrics: mco.Spec.ObservabilityAddonSpec.EnableMetrics, + Interval: mco.Spec.ObservabilityAddonSpec.Interval, + Resources: config.GetOBAResources(mco.Spec.ObservabilityAddonSpec), + }, + }, nil +} + +func removeObservabilityAddon(client client.Client, namespace string) error { + name := namespace + workNameSuffix + found := &workv1.ManifestWork{} + err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, found) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + log.Error(err, "Failed to check manifestwork", "namespace", namespace, "name", name) + return err + } + + obj, err := util.GetObject(found.Spec.Workload.Manifests[0].RawExtension) + if err != nil { + return err + } + if obj.GetObjectKind().GroupVersionKind().Kind == "ObservabilityAddon" { + updateManifests := found.Spec.Workload.Manifests[1:] + found.Spec.Workload.Manifests = updateManifests + + err = client.Update(context.TODO(), found) + if err != nil { + log.Error(err, "Failed to update manifestwork", "namespace", namespace, "name", name) + return err + } + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go b/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go new file mode 100644 index 000000000..9dc957b80 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go @@ -0,0 +1,425 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "encoding/base64" + "os" + "path" + "reflect" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/stolostron/multicloud-operators-foundation/pkg/apis/imageregistry/v1alpha1" + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + operatorconfig "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + pullSecretName = "test-pull-secret" + workSize = 13 +) + +func newTestMCO() *mcov1beta2.MultiClusterObservability { + return &mcov1beta2.MultiClusterObservability{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoName, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + ImagePullSecret: pullSecretName, + ObservabilityAddonSpec: &mcoshared.ObservabilityAddonSpec{ + EnableMetrics: true, + Interval: 1, + }, + }, + } +} + +func newTestPullSecret() *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pullSecretName, + Namespace: mcoNamespace, + }, + Data: map[string][]byte{ + ".dockerconfigjson": []byte("test-docker-config"), + }, + } +} + +func newCASecret() *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.ServerCACerts, + Namespace: mcoNamespace, + }, + Data: map[string][]byte{ + "ca.crt": []byte("test-ca-crt"), + }, + } +} + +func newCertSecret(namespaces ...string) *corev1.Secret { + ns := namespace + if len(namespaces) != 0 { + ns = namespaces[0] + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: managedClusterObsCertName, + Namespace: ns, + }, + Data: map[string][]byte{ + "tls.crt": []byte("test-tls-crt"), + "tls.key": []byte("test-tls-key"), + }, + } +} + +func NewMetricsAllowListCM() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorconfig.AllowlistConfigMapName, + Namespace: mcoNamespace, + }, + Data: map[string]string{"metrics_list.yaml": ` + names: + - a + - b + renames: + a: c + rules: + - record: f + expr: g +`}, + } +} + +func NewMetricsCustomAllowListCM() *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AllowlistCustomConfigMapName, + Namespace: mcoNamespace, + }, + Data: map[string]string{"metrics_list.yaml": ` + names: + - c + - d + renames: + d: e + rules: + - record: h + expr: i +`}, + } +} + +func NewAmAccessorSA() *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerAccessorSAName, + Namespace: mcoNamespace, + }, + Secrets: []corev1.ObjectReference{ + {Name: config.AlertmanagerAccessorSecretName + "-token-xxx"}, + }, + } +} + +func NewAmAccessorTokenSecret() *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertmanagerAccessorSecretName + "-token-xxx", + Namespace: mcoNamespace, + }, + Data: map[string][]byte{ + "token": []byte("xxxxx"), + }, + } +} + +func newCluster(name string, labels map[string]string) *clusterv1.ManagedCluster { + return &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + } +} + +func newPullSecret(name, namespace string, data []byte) *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: data, + }, + StringData: nil, + Type: corev1.SecretTypeDockerConfigJson, + } +} + +func newImageRegistry(name, namespace, registryAddress, pullSecret string) *v1alpha1.ManagedClusterImageRegistry { + return &v1alpha1.ManagedClusterImageRegistry{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.ImageRegistrySpec{ + Registry: registryAddress, + PullSecret: corev1.LocalObjectReference{Name: pullSecret}, + PlacementRef: v1alpha1.PlacementRef{}, + }, + } +} + +func TestManifestWork(t *testing.T) { + initSchema(t) + objs := []runtime.Object{ + newTestObsApiRoute(), + newTestAlertmanagerRoute(), + newTestIngressController(), + newTestRouteCASecret(), + newCASecret(), + newCertSecret(mcoNamespace), + NewMetricsAllowListCM(), + NewMetricsCustomAllowListCM(), + NewAmAccessorSA(), + NewAmAccessorTokenSecret(), + newCluster(clusterName, map[string]string{v1alpha1.ClusterImageRegistryLabel: namespace + ".image_registry"}), + newImageRegistry("image_registry", namespace, "registry_server", "custorm_pull_secret"), + newPullSecret("custorm_pull_secret", namespace, []byte("custorm")), + } + c := fake.NewFakeClient(objs...) + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get work dir: (%v)", err) + } + os.MkdirAll(path.Join(wd, "../../tests"), 0755) + testManifestsPath := path.Join(wd, "../../tests/manifests") + manifestsPath := path.Join(wd, "../../manifests") + os.Setenv("TEMPLATES_PATH", testManifestsPath) + err = os.Symlink(manifestsPath, testManifestsPath) + if err != nil { + t.Fatalf("Failed to create symbollink(%s) to(%s) for the test manifests: (%v)", testManifestsPath, manifestsPath, err) + } + works, crdWork, _, err := generateGlobalManifestResources(c, newTestMCO()) + if err != nil { + t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + } + t.Logf("work size is %d", len(works)) + if hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, true); err != nil { + t.Fatalf("Failed to generate hubInfo secret: (%v)", err) + } + err = createManifestWorks(c, nil, namespace, clusterName, newTestMCO(), works, crdWork, endpointMetricsOperatorDeploy, hubInfoSecret, false) + if err != nil { + t.Fatalf("Failed to create manifestworks: (%v)", err) + } + found := &workv1.ManifestWork{} + workName := namespace + workNameSuffix + err = c.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork %s: (%v)", workName, err) + } + if len(found.Spec.Workload.Manifests) != workSize-1 { + t.Fatalf("Wrong size of manifests in the mainfestwork %s: %d", workName, len(found.Spec.Workload.Manifests)) + } + + err = c.Create(context.TODO(), newTestPullSecret()) + if err != nil { + t.Fatalf("Failed to create pull secret: (%v)", err) + } + // reset image pull secret + pullSecret = nil + works, crdWork, _, err = generateGlobalManifestResources(c, newTestMCO()) + if err != nil { + t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + } + err = createManifestWorks(c, nil, namespace, clusterName, newTestMCO(), works, crdWork, endpointMetricsOperatorDeploy, hubInfoSecret, false) + if err != nil { + t.Fatalf("Failed to create manifestworks: (%v)", err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork %s: (%v)", workName, err) + } + if len(found.Spec.Workload.Manifests) != workSize { + t.Fatalf("Wrong size of manifests in the mainfestwork %s: %d", workName, len(found.Spec.Workload.Manifests)) + } + + spokeNameSpace = "spoke-ns" + err = createManifestWorks(c, nil, namespace, clusterName, newTestMCO(), works, crdWork, endpointMetricsOperatorDeploy, hubInfoSecret, false) + if err != nil { + t.Fatalf("Failed to create manifestworks with updated namespace: (%v)", err) + } + + err = deleteManifestWorks(c, namespace) + if err != nil { + t.Fatalf("Failed to delete manifestworks: (%v)", err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace + workNameSuffix, Namespace: namespace}, found) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("Manifestwork not deleted: (%v)", err) + } + + // set the default pull secret + pullSecret = newPullSecret("multiclusterhub-operator-pull-secret", namespace, []byte("default")) + // config the managedcluster to use the custom registry + managedClusterImageRegistryMutex.Lock() + managedClusterImageRegistry[clusterName] = "open-cluster-management.io/image-registry=" + namespace + ".image_registry" + managedClusterImageRegistryMutex.Unlock() + + works, crdWork, _, err = generateGlobalManifestResources(c, newTestMCO()) + if err != nil { + t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + } + + if hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, true); err != nil { + t.Fatalf("Failed to generate hubInfo secret: (%v)", err) + } + + err = createManifestWorks(c, nil, namespace, clusterName, newTestMCO(), works, crdWork, endpointMetricsOperatorDeploy, hubInfoSecret, false) + if err != nil { + t.Fatalf("Failed to create manifestworks: (%v)", err) + } + found = &workv1.ManifestWork{} + workName = namespace + workNameSuffix + err = c.Get(context.TODO(), types.NamespacedName{Name: workName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork %s: (%v)", workName, err) + } + + // To check pullsecret, endpoint-observability-operator and image list configmap + for _, manifest := range found.Spec.Workload.Manifests { + obj := &unstructured.Unstructured{} + obj.UnmarshalJSON(manifest.Raw) + if obj.GetKind() == "Secret" && obj.GetName() == "multiclusterhub-operator-pull-secret" { + if !strings.Contains(string(manifest.Raw), base64.StdEncoding.EncodeToString([]byte("custorm"))) { + t.Errorf("multiclusterhub-operator-pull-secret should use the custom pull secret") + } + } + + if obj.GetKind() == "ConfigMap" && obj.GetName() == "images-list" { + if !strings.Contains(string(manifest.Raw), "registry_server") { + t.Errorf("images-list should use the custom registry image") + } + } + + if obj.GetKind() == "Deployment" && obj.GetName() == "endpoint-observability-operator" { + if !strings.Contains(string(manifest.Raw), "registry_server") { + t.Errorf("endpoint-observability-operator should use the custom registry image") + } + } + } + + if err = os.Remove(testManifestsPath); err != nil { + t.Fatalf("Failed to delete symbollink(%s) for the test manifests: (%v)", testManifestsPath, err) + } + os.Remove(path.Join(wd, "../../tests")) +} + +func TestMergeMetrics(t *testing.T) { + testCaseList := []struct { + name string + defaultAllowlist []string + customAllowlist []string + want []string + }{ + { + name: "no deleted metrics", + defaultAllowlist: []string{"a", "b"}, + customAllowlist: []string{"c"}, + want: []string{"a", "b", "c"}, + }, + + { + name: "no default metrics", + defaultAllowlist: []string{}, + customAllowlist: []string{"a"}, + want: []string{"a"}, + }, + + { + name: "no metrics", + defaultAllowlist: []string{}, + customAllowlist: []string{}, + want: []string{}, + }, + + { + name: "have deleted metrics", + defaultAllowlist: []string{"a", "b"}, + customAllowlist: []string{"c", "-b"}, + want: []string{"a", "c"}, + }, + + { + name: "have deleted matches", + defaultAllowlist: []string{"__name__=\"a\",job=\"a\"", "__name__=\"b\",job=\"b\""}, + customAllowlist: []string{"-__name__=\"b\",job=\"b\"", "__name__=\"c\",job=\"c\""}, + want: []string{"__name__=\"a\",job=\"a\"", "__name__=\"c\",job=\"c\""}, + }, + + { + name: "deleted metrics is no exist", + defaultAllowlist: []string{"a", "b"}, + customAllowlist: []string{"c", "-d"}, + want: []string{"a", "b", "c"}, + }, + + { + name: "deleted all metrics", + defaultAllowlist: []string{"a", "b"}, + customAllowlist: []string{"-a", "-b"}, + want: []string{}, + }, + + { + name: "delete custorm metrics", + defaultAllowlist: []string{"a", "b"}, + customAllowlist: []string{"a", "-a"}, + want: []string{"b"}, + }, + + { + name: "have repeated default metrics", + defaultAllowlist: []string{"a", "a"}, + customAllowlist: []string{"a", "-b"}, + want: []string{"a"}, + }, + + { + name: "have repeated custom metrics", + defaultAllowlist: []string{"a"}, + customAllowlist: []string{"b", "b", "-a"}, + want: []string{"b"}, + }, + } + + for _, c := range testCaseList { + got := mergeMetrics(c.defaultAllowlist, c.customAllowlist) + if !reflect.DeepEqual(got, c.want) { + t.Errorf("%v: mergeMetrics() = %v, want %v", c.name, got, c.want) + } + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/namespace.go b/operators/multiclusterobservability/controllers/placementrule/namespace.go new file mode 100644 index 000000000..59b92eb5b --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/namespace.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "os" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + spokeNameSpace = os.Getenv("SPOKE_NAMESPACE") +) + +func generateNamespace() *corev1.Namespace { + return &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: spokeNameSpace, + }, + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/namespace_test.go b/operators/multiclusterobservability/controllers/placementrule/namespace_test.go new file mode 100644 index 000000000..1fed7fc72 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/namespace_test.go @@ -0,0 +1,20 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "testing" +) + +const ( + name = "test-ns" +) + +func TestGenerateNamespace(t *testing.T) { + spokeNameSpace = name + namespace := generateNamespace() + if namespace.Name != name { + t.Fatal("Wrong namespace created") + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/obsaddon.go b/operators/multiclusterobservability/controllers/placementrule/obsaddon.go new file mode 100644 index 000000000..48035ef03 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/obsaddon.go @@ -0,0 +1,136 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + obsv1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + obsAddonName = "observability-addon" + obsAddonFinalizer = "observability.open-cluster-management.io/addon-cleanup" +) + +func deleteObsAddon(c client.Client, namespace string) error { + found := &obsv1beta1.ObservabilityAddon{} + err := c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + log.Error(err, "Failed to check observabilityaddon cr before delete", "namespace", namespace) + return err + } + + err = c.Delete(context.TODO(), found) + if err != nil { + log.Error(err, "Failed to delete observabilityaddon", "namespace", namespace) + } + + err = removeObservabilityAddon(c, namespace) + if err != nil { + return err + } + + // forcely remove observabilityaddon if it's already stuck in Terminating more than 5 minutes + time.AfterFunc(time.Duration(5)*time.Minute, func() { + err := deleteStaleObsAddon(c, namespace, false) + if err != nil { + log.Error(err, "Failed to forcely remove observabilityaddon", "namespace", namespace) + } + }) + + log.Info("observabilityaddon is deleted", "namespace", namespace) + return nil +} + +func createObsAddon(c client.Client, namespace string) error { + ec := &obsv1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: obsAddonName, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + } + found := &obsv1beta1.ObservabilityAddon{} + err := c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err != nil && errors.IsNotFound(err) || err == nil && found.GetDeletionTimestamp() != nil { + if err == nil { + err = deleteFinalizer(c, found) + if err != nil { + return err + } + } + log.Info("Creating observabilityaddon cr", "namespace", namespace) + err = c.Create(context.TODO(), ec) + if err != nil { + log.Error(err, "Failed to create observabilityaddon cr") + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check observabilityaddon cr before create") + return err + } + + log.Info("observabilityaddon already existed/unchanged", "namespace", namespace) + return nil +} + +func deleteStaleObsAddon(c client.Client, namespace string, isForce bool) error { + found := &obsv1beta1.ObservabilityAddon{} + err := c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + log.Error(err, "Failed to check observabilityaddon cr before delete stale ones", "namespace", namespace) + return err + } + if found.GetDeletionTimestamp() == nil && !isForce { + log.Info("observabilityaddon is not in Terminating status, skip", "namespace", namespace) + return nil + } + err = deleteFinalizer(c, found) + if err != nil { + return err + } + obsaddon := &obsv1beta1.ObservabilityAddon{ + ObjectMeta: metav1.ObjectMeta{ + Name: obsAddonName, + Namespace: namespace, + }, + } + err = c.Delete(context.TODO(), obsaddon) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete observabilityaddon", "namespace", namespace) + return err + } + log.Info("observabilityaddon is deleted thoroughly", "namespace", namespace) + return nil +} + +func deleteFinalizer(c client.Client, obsaddon *obsv1beta1.ObservabilityAddon) error { + if util.Contains(obsaddon.GetFinalizers(), obsAddonFinalizer) { + obsaddon.SetFinalizers(util.Remove(obsaddon.GetFinalizers(), obsAddonFinalizer)) + err := c.Update(context.TODO(), obsaddon) + if err != nil { + log.Error(err, "Failed to delete finalizer in observabilityaddon", "namespace", obsaddon.Namespace) + return err + } + log.Info("observabilityaddon's finalizer is deleted", "namespace", obsaddon.Namespace) + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go b/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go new file mode 100644 index 000000000..3d5ce32cb --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go @@ -0,0 +1,105 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "testing" + "time" + + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestObsAddonCR(t *testing.T) { + initSchema(t) + + objs := []runtime.Object{newTestObsApiRoute()} + c := fake.NewFakeClient(objs...) + + err := createObsAddon(c, namespace) + if err != nil { + t.Fatalf("Failed to create observabilityaddon: (%v)", err) + } + found := &mcov1beta1.ObservabilityAddon{} + err = c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get observabilityaddon: (%v)", err) + } + + err = createObsAddon(c, namespace) + if err != nil { + t.Fatalf("Failed to create observabilityaddon: (%v)", err) + } + + testWork := newManifestwork(namespace+workNameSuffix, namespace) + testManifests := testWork.Spec.Workload.Manifests + testObservabilityAddon := &mcov1beta1.ObservabilityAddon{} + err = c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, testObservabilityAddon) + if err != nil { + t.Fatalf("Failed to get observabilityaddon: (%v)", err) + } + // inject the testing observabilityAddon + if testObservabilityAddon != nil { + testManifests = injectIntoWork(testManifests, testObservabilityAddon) + } + testWork.Spec.Workload.Manifests = testManifests + + err = c.Create(context.TODO(), testWork) + if err != nil { + t.Fatalf("Failed to create manifestwork: (%v)", err) + } + + err = deleteObsAddon(c, namespace) + if err != nil { + t.Fatalf("Failed to delete observabilityaddon: (%v)", err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("Failed to delete observabilityaddon: (%v)", err) + } + + err = deleteObsAddon(c, namespace) + if err != nil { + t.Fatalf("Failed to delete observabilityaddon: (%v)", err) + } + + err = deleteManifestWork(c, namespace+workNameSuffix, namespace) + if err != nil { + t.Fatalf("Failed to delete manifestwork: (%v)", err) + } +} + +func TestStaleObsAddonCR(t *testing.T) { + initSchema(t) + + objs := []runtime.Object{newTestObsApiRoute()} + c := fake.NewFakeClient(objs...) + + err := createObsAddon(c, namespace) + if err != nil { + t.Fatalf("Failed to create observabilityaddon: (%v)", err) + } + found := &mcov1beta1.ObservabilityAddon{} + err = c.Get(context.TODO(), types.NamespacedName{Name: obsAddonName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get observabilityaddon: (%v)", err) + } + + found.ObjectMeta.DeletionTimestamp = &v1.Time{Time: time.Now()} + found.SetFinalizers([]string{obsAddonFinalizer}) + err = c.Update(context.TODO(), found) + if err != nil { + t.Fatalf("Failed to update observabilityaddon: (%v)", err) + } + + err = deleteStaleObsAddon(c, namespace, true) + if err != nil { + t.Fatalf("Failed to remove stale observabilityaddon: (%v)", err) + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go new file mode 100644 index 000000000..28939a0ba --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go @@ -0,0 +1,865 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "errors" + "reflect" + "sync" + "time" + + "github.com/go-logr/logr" + operatorv1 "github.com/openshift/api/operator/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" + commonutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" + mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + ownerLabelKey = "owner" + ownerLabelValue = "multicluster-observability-operator" + managedClusterObsCertName = "observability-managed-cluster-certs" + nonOCP = "N/A" +) + +var ( + log = logf.Log.WithName("controller_placementrule") + watchNamespace = config.GetDefaultNamespace() + isCRoleCreated = false + isClusterManagementAddonCreated = false + isplacementControllerRunnning = false + managedClusterList = map[string]string{} + managedClusterListMutex = &sync.RWMutex{} +) + +// PlacementRuleReconciler reconciles a PlacementRule object +type PlacementRuleReconciler struct { + Client client.Client + Log logr.Logger + Scheme *runtime.Scheme + CRDMap map[string]bool + RESTMapper meta.RESTMapper +} + +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=placementrules,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=placementrules/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=observability.open-cluster-management.io,resources=placementrules/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// Modify the Reconcile function to compare the state specified by +// the PlacementRule object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile +func (r *PlacementRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + reqLogger := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) + reqLogger.Info("Reconciling PlacementRule") + + if config.GetMonitoringCRName() == "" { + reqLogger.Info("multicluster observability resource is not available") + return ctrl.Result{}, nil + } + + deleteAll := false + // Fetch the MultiClusterObservability instance + mco := &mcov1beta2.MultiClusterObservability{} + err := r.Client.Get(context.TODO(), + types.NamespacedName{ + Name: config.GetMonitoringCRName(), + }, mco) + if err != nil { + if k8serrors.IsNotFound(err) { + deleteAll = true + } else { + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + } + + // Do not reconcile objects if this instance of mch is labeled "paused" + if config.IsPaused(mco.GetAnnotations()) { + reqLogger.Info("MCO reconciliation is paused. Nothing more to do.") + return ctrl.Result{}, nil + } + + // check if the MCH CRD exists + mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + // requeue after 10 seconds if the mch crd exists and image image manifests map is empty + if mchCrdExists && len(config.GetImageManifests()) == 0 { + // if the mch CR is not ready, then requeue the request after 10s + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + + // check if the server certificate for managedcluster + if managedClusterObsCert == nil { + var err error + managedClusterObsCert, err = generateObservabilityServerCACerts(r.Client) + if err != nil && k8serrors.IsNotFound(err) { + // if the servser certificate for managedcluster is not ready, then requeue the request after 10s to avoid useless reconcile loop. + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + } + + opts := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ownerLabelKey: ownerLabelValue}), + } + if req.Namespace != config.GetDefaultNamespace() && + req.Namespace != "" { + opts.Namespace = req.Namespace + } + + obsAddonList := &mcov1beta1.ObservabilityAddonList{} + err = r.Client.List(context.TODO(), obsAddonList, opts) + if err != nil { + reqLogger.Error(err, "Failed to list observabilityaddon resource") + return ctrl.Result{}, err + } + + if !deleteAll { + res, err := createAllRelatedRes(r.Client, r.RESTMapper, req, mco, obsAddonList, r.CRDMap[config.IngressControllerCRD]) + if err != nil { + return res, err + } + } else { + res, err := deleteAllObsAddons(r.Client, obsAddonList) + if err != nil { + return res, err + } + } + + obsAddonList = &mcov1beta1.ObservabilityAddonList{} + err = r.Client.List(context.TODO(), obsAddonList, opts) + if err != nil { + reqLogger.Error(err, "Failed to list observabilityaddon resource") + return ctrl.Result{}, err + } + workList := &workv1.ManifestWorkList{} + err = r.Client.List(context.TODO(), workList, opts) + if err != nil { + reqLogger.Error(err, "Failed to list manifestwork resource") + return ctrl.Result{}, err + } + managedclusteraddonList := &addonv1alpha1.ManagedClusterAddOnList{} + err = r.Client.List(context.TODO(), managedclusteraddonList, opts) + if err != nil { + reqLogger.Error(err, "Failed to list managedclusteraddon resource") + return ctrl.Result{}, err + } + latestClusters := []string{} + staleAddons := []string{} + for _, addon := range obsAddonList.Items { + latestClusters = append(latestClusters, addon.Namespace) + staleAddons = append(staleAddons, addon.Namespace) + } + for _, work := range workList.Items { + if work.Name != work.Namespace+workNameSuffix { + reqLogger.Info("To delete invalid manifestwork", "name", work.Name, "namespace", work.Namespace) + err = deleteManifestWork(r.Client, work.Name, work.Namespace) + if err != nil { + return ctrl.Result{}, err + } + } + if !commonutil.Contains(latestClusters, work.Namespace) { + reqLogger.Info("To delete manifestwork", "namespace", work.Namespace) + err = deleteManagedClusterRes(r.Client, work.Namespace) + if err != nil { + return ctrl.Result{}, err + } + } else { + staleAddons = commonutil.Remove(staleAddons, work.Namespace) + } + } + + // after the managedcluster is detached, the manifestwork for observability will be delete be the cluster manager, + // but the managedclusteraddon for observability will not deleted by the cluster manager, so check against the + // managedclusteraddon list to remove the managedcluster resources after the managedcluster is detached. + for _, mcaddon := range managedclusteraddonList.Items { + if !commonutil.Contains(latestClusters, mcaddon.Namespace) { + reqLogger.Info("To delete managedcluster resources", "namespace", mcaddon.Namespace) + err = deleteManagedClusterRes(r.Client, mcaddon.Namespace) + if err != nil { + return ctrl.Result{}, err + } + } else { + staleAddons = commonutil.Remove(staleAddons, mcaddon.Namespace) + } + } + + // delete stale addons if manifestwork does not exist + for _, addon := range staleAddons { + err = deleteStaleObsAddon(r.Client, addon, true) + if err != nil { + return ctrl.Result{}, err + } + } + + // only update managedclusteraddon status when obs addon's status updated + if req.Name == obsAddonName { + err = updateAddonStatus(r.Client, *obsAddonList) + if err != nil { + return ctrl.Result{}, err + } + } + + if deleteAll { + opts.Namespace = "" + err = r.Client.List(context.TODO(), workList, opts) + if err != nil { + reqLogger.Error(err, "Failed to list manifestwork resource") + return ctrl.Result{}, err + } + if len(workList.Items) == 0 { + err = deleteGlobalResource(r.Client) + } + } + + return ctrl.Result{}, err +} + +func createAllRelatedRes( + c client.Client, + restMapper meta.RESTMapper, + request ctrl.Request, + mco *mcov1beta2.MultiClusterObservability, + obsAddonList *mcov1beta1.ObservabilityAddonList, + ingressCtlCrdExists bool) (ctrl.Result, error) { + + // create the clusterrole if not there + if !isCRoleCreated { + err := createClusterRole(c) + if err != nil { + return ctrl.Result{}, err + } + err = createResourceRole(c) + if err != nil { + return ctrl.Result{}, err + } + isCRoleCreated = true + } + //Check if ClusterManagementAddon is created or create it + if !isClusterManagementAddonCreated { + err := util.CreateClusterManagementAddon(c) + if err != nil { + return ctrl.Result{}, err + } + isClusterManagementAddonCreated = true + } + + currentClusters := []string{} + for _, ep := range obsAddonList.Items { + currentClusters = append(currentClusters, ep.Namespace) + } + + // need to reload the template and update the the corresponding resources + // the loadTemplates method is now lightweight operations as we have cache the templates in memory. + log.Info("load and update templates for managedcluster resources") + rawExtensionList, obsAddonCRDv1, obsAddonCRDv1beta1, + endpointMetricsOperatorDeploy, imageListConfigMap, _ = loadTemplates(mco) + + works, crdv1Work, crdv1beta1Work, err := generateGlobalManifestResources(c, mco) + if err != nil { + return ctrl.Result{}, err + } + + // regenerate the hubinfo secret if empty + if hubInfoSecret == nil { + var err error + if hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists); err != nil { + return ctrl.Result{}, err + } + } + + failedCreateManagedClusterRes := false + managedClusterListMutex.RLock() + for managedCluster, openshiftVersion := range managedClusterList { + currentClusters = commonutil.Remove(currentClusters, managedCluster) + // enter the loop for the following reconcile requests: + // 1. MCO CR change(request name is "mco-updated-request") + // 2. MCH resource change(request name is "mch-updated-request"), to handle image replacement in upgrade case. + // 3. configmap/secret... resource change from observability namespace + // 4. managedcluster change(request namespace is emprt string and request name is managedcluster name) + // 5. manifestwork/observabilityaddon/managedclusteraddon/rolebinding... change from managedcluster namespace + if request.Name == config.MCOUpdatedRequestName || + request.Name == config.MCHUpdatedRequestName || + request.Namespace == config.GetDefaultNamespace() || + (request.Namespace == "" && request.Name == managedCluster) || + request.Namespace == managedCluster { + log.Info("Monitoring operator should be installed in cluster", "cluster_name", managedCluster, "request.name", request.Name, "request.namespace", request.Namespace) + if openshiftVersion == "3" { + err = createManagedClusterRes(c, restMapper, mco, + managedCluster, managedCluster, + works, crdv1beta1Work, endpointMetricsOperatorDeploy, hubInfoSecret, false) + } else if openshiftVersion == nonOCP { + err = createManagedClusterRes(c, restMapper, mco, + managedCluster, managedCluster, + works, crdv1Work, endpointMetricsOperatorDeploy, hubInfoSecret, true) + } else { + err = createManagedClusterRes(c, restMapper, mco, + managedCluster, managedCluster, + works, crdv1Work, endpointMetricsOperatorDeploy, hubInfoSecret, false) + } + if err != nil { + failedCreateManagedClusterRes = true + log.Error(err, "Failed to create managedcluster resources", "namespace", managedCluster) + } + if request.Namespace == managedCluster { + break + } + } + } + managedClusterListMutex.RUnlock() + + failedDeleteOba := false + for _, cluster := range currentClusters { + log.Info("To delete observabilityAddon", "namespace", cluster) + err = deleteObsAddon(c, cluster) + if err != nil { + failedDeleteOba = true + log.Error(err, "Failed to delete observabilityaddon", "namespace", cluster) + } + } + + if failedCreateManagedClusterRes || failedDeleteOba { + return ctrl.Result{}, errors.New("Failed to create managedcluster resources or" + + " failed to delete observabilityaddon, skip and reconcile later") + } + + return ctrl.Result{}, nil +} + +func deleteAllObsAddons( + client client.Client, + obsAddonList *mcov1beta1.ObservabilityAddonList) (ctrl.Result, error) { + for _, ep := range obsAddonList.Items { + err := deleteObsAddon(client, ep.Namespace) + if err != nil { + log.Error(err, "Failed to delete observabilityaddon", "namespace", ep.Namespace) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +func deleteGlobalResource(c client.Client) error { + err := deleteClusterRole(c) + if err != nil { + return err + } + err = deleteResourceRole(c) + if err != nil { + return err + } + isCRoleCreated = false + //delete ClusterManagementAddon + err = util.DeleteClusterManagementAddon(c) + if err != nil { + return err + } + isClusterManagementAddonCreated = false + return nil +} + +func createManagedClusterRes(client client.Client, restMapper meta.RESTMapper, + mco *mcov1beta2.MultiClusterObservability, name string, namespace string, + works []workv1.Manifest, crdWork *workv1.Manifest, dep *appsv1.Deployment, + hubInfo *corev1.Secret, installProm bool) error { + err := createObsAddon(client, namespace) + if err != nil { + log.Error(err, "Failed to create observabilityaddon") + return err + } + + err = createRolebindings(client, namespace, name) + if err != nil { + return err + } + + err = createManifestWorks(client, restMapper, namespace, name, mco, works, crdWork, dep, hubInfo, installProm) + if err != nil { + log.Error(err, "Failed to create manifestwork") + return err + } + + err = util.CreateManagedClusterAddonCR(client, namespace, ownerLabelKey, ownerLabelValue) + if err != nil { + log.Error(err, "Failed to create ManagedClusterAddon") + return err + } + + return nil +} + +func deleteManagedClusterRes(c client.Client, namespace string) error { + + managedclusteraddon := &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.ManagedClusterAddonName, + Namespace: namespace, + }, + } + err := c.Delete(context.TODO(), managedclusteraddon) + if err != nil && !k8serrors.IsNotFound(err) { + log.Error(err, "Failed to delete managedclusteraddon") + return err + } + + err = deleteRolebindings(c, namespace) + if err != nil { + return err + } + + err = deleteManifestWorks(c, namespace) + if err != nil { + log.Error(err, "Failed to delete manifestwork") + return err + } + return nil +} + +func updateManagedClusterList(obj client.Object) { + managedClusterListMutex.Lock() + defer managedClusterListMutex.Unlock() + if version, ok := obj.GetLabels()["openshiftVersion"]; ok { + managedClusterList[obj.GetName()] = version + } else { + managedClusterList[obj.GetName()] = nonOCP + } +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { + c := mgr.GetClient() + ingressCtlCrdExists, _ := r.CRDMap[config.IngressControllerCRD] + clusterPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + log.Info("CreateFunc", "managedCluster", e.Object.GetName()) + updateManagedClusterList(e.Object) + updateManagedClusterImageRegistry(e.Object) + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + log.Info("UpdateFunc", "managedCluster", e.ObjectNew.GetName()) + if e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + if e.ObjectNew.GetDeletionTimestamp() != nil { + log.Info("managedcluster is in terminating state", "managedCluster", e.ObjectNew.GetName()) + managedClusterListMutex.Lock() + delete(managedClusterList, e.ObjectNew.GetName()) + managedClusterListMutex.Unlock() + managedClusterImageRegistryMutex.Lock() + delete(managedClusterImageRegistry, e.ObjectNew.GetName()) + managedClusterImageRegistryMutex.Unlock() + } else { + updateManagedClusterList(e.ObjectNew) + updateManagedClusterImageRegistry(e.ObjectNew) + } + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + log.Info("DeleteFunc", "managedCluster", e.Object.GetName()) + managedClusterListMutex.Lock() + delete(managedClusterList, e.Object.GetName()) + managedClusterListMutex.Unlock() + managedClusterImageRegistryMutex.Lock() + delete(managedClusterImageRegistry, e.Object.GetName()) + managedClusterImageRegistryMutex.Unlock() + return true + }, + } + + obsAddonPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetName() == obsAddonName && + e.ObjectNew.GetLabels()[ownerLabelKey] == ownerLabelValue && + !reflect.DeepEqual(e.ObjectNew.(*mcov1beta1.ObservabilityAddon).Status.Conditions, + e.ObjectOld.(*mcov1beta1.ObservabilityAddon).Status.Conditions) { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetName() == obsAddonName && + e.Object.GetLabels()[ownerLabelKey] == ownerLabelValue { + log.Info("DeleteFunc", "obsAddonNamespace", e.Object.GetNamespace(), "obsAddonName", e.Object.GetName()) + /* #nosec */ + removePostponeDeleteAnnotationForManifestwork(c, e.Object.GetNamespace()) + return true + } + return false + }, + } + + mcoPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // generate the image pull secret + pullSecret, _ = generatePullSecret(c, config.GetImagePullSecret(e.Object.(*mcov1beta2.MultiClusterObservability).Spec)) + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + // only reconcile when ObservabilityAddonSpec updated + if e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && + !reflect.DeepEqual(e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec.ObservabilityAddonSpec, + e.ObjectOld.(*mcov1beta2.MultiClusterObservability).Spec.ObservabilityAddonSpec) { + if e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec.ImagePullSecret != e.ObjectOld.(*mcov1beta2.MultiClusterObservability).Spec.ImagePullSecret { + // regenerate the image pull secret + pullSecret, _ = generatePullSecret(c, config.GetImagePullSecret(e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec)) + } + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + } + + customAllowlistPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetName() == config.AllowlistCustomConfigMapName && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + // generate the metrics allowlist configmap + log.Info("generate metric allow list configmap for custom configmap CREATE") + metricsAllowlistConfigMap, _ = generateMetricsListCM(c) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetName() == config.AllowlistCustomConfigMapName && + e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + // regenerate the metrics allowlist configmap + log.Info("generate metric allow list configmap for custom configmap UPDATE") + metricsAllowlistConfigMap, _ = generateMetricsListCM(c) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetName() == config.AllowlistCustomConfigMapName && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + // regenerate the metrics allowlist configmap + log.Info("generate metric allow list configmap for custom configmap UPDATE") + metricsAllowlistConfigMap, _ = generateMetricsListCM(c) + return true + } + return false + }, + } + + certSecretPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetName() == config.ServerCACerts && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + // generate the certificate for managed cluster + log.Info("generate managedcluster observability certificate for server certificate CREATE") + managedClusterObsCert, _ = generateObservabilityServerCACerts(c) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if (e.ObjectNew.GetName() == config.ServerCACerts && + e.ObjectNew.GetNamespace() == config.GetDefaultNamespace()) && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + // regenerate the certificate for managed cluster + log.Info("generate managedcluster observability certificate for server certificate UPDATE") + managedClusterObsCert, _ = generateObservabilityServerCACerts(c) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + ingressControllerPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetName() == config.OpenshiftIngressOperatorCRName && + e.Object.GetNamespace() == config.OpenshiftIngressOperatorNamespace { + // generate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetName() == config.OpenshiftIngressOperatorCRName && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && + e.ObjectNew.GetNamespace() == config.OpenshiftIngressOperatorNamespace { + // regenerate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetName() == config.OpenshiftIngressOperatorCRName && + e.Object.GetNamespace() == config.OpenshiftIngressOperatorNamespace { + // regenerate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + } + + amRouterCertSecretPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetNamespace() == config.GetDefaultNamespace() && + (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || + e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { + // generate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && + (e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCAName || + e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCERTName) { + // regenerate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + if e.Object.GetNamespace() == config.GetDefaultNamespace() && + (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || + e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { + // regenerate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + } + + routeCASecretPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if (e.Object.GetNamespace() == config.OpenshiftIngressOperatorNamespace && + e.Object.GetName() == config.OpenshiftIngressRouteCAName) || + (e.Object.GetNamespace() == config.OpenshiftIngressNamespace && + e.Object.GetName() == config.OpenshiftIngressDefaultCertName) { + // generate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if ((e.ObjectNew.GetNamespace() == config.OpenshiftIngressOperatorNamespace && + e.ObjectNew.GetName() == config.OpenshiftIngressRouteCAName) || + (e.ObjectNew.GetNamespace() == config.OpenshiftIngressNamespace && + e.ObjectNew.GetName() == config.OpenshiftIngressDefaultCertName)) && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + // regenerate the hubInfo secret + hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + amAccessorSAPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + if e.Object.GetName() == config.AlertmanagerAccessorSAName && + e.Object.GetNamespace() == config.GetDefaultNamespace() { + // wait 10s for access_token of alertmanager and generate the secret that contains the access_token + /* #nosec */ + wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { + var err error + log.Info("generate amAccessorTokenSecret for alertmanager access serviceaccount CREATE") + if amAccessorTokenSecret, err = generateAmAccessorTokenSecret(c); err == nil { + return true, nil + } + return false, err + }) + return true + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if (e.ObjectNew.GetName() == config.AlertmanagerAccessorSAName && + e.ObjectNew.GetNamespace() == config.GetDefaultNamespace()) && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { + // regenerate the secret that contains the access_token for the Alertmanager in the Hub cluster + amAccessorTokenSecret, _ = generateAmAccessorTokenSecret(c) + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + ctrBuilder := ctrl.NewControllerManagedBy(mgr). + // Watch for changes to primary resource ManagedCluster with predicate + For(&clusterv1.ManagedCluster{}, builder.WithPredicates(clusterPred)). + // secondary watch for observabilityaddon + Watches(&source.Kind{Type: &mcov1beta1.ObservabilityAddon{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(obsAddonPred)). + // secondary watch for MCO + Watches(&source.Kind{Type: &mcov1beta2.MultiClusterObservability{}}, handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: config.MCOUpdatedRequestName, + }}, + } + }), builder.WithPredicates(mcoPred)). + // secondary watch for custom allowlist configmap + Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(customAllowlistPred)). + // secondary watch for certificate secrets + Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(certSecretPred)). + // secondary watch for alertmanager accessor serviceaccount + Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(amAccessorSAPred)) + + manifestWorkGroupKind := schema.GroupKind{Group: workv1.GroupVersion.Group, Kind: "ManifestWork"} + if _, err := r.RESTMapper.RESTMapping(manifestWorkGroupKind, workv1.GroupVersion.Version); err == nil { + workPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetLabels()[ownerLabelKey] == ownerLabelValue && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && + !reflect.DeepEqual(e.ObjectNew.(*workv1.ManifestWork).Spec.Workload.Manifests, + e.ObjectOld.(*workv1.ManifestWork).Spec.Workload.Manifests) { + return true + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return e.Object.GetLabels()[ownerLabelKey] == ownerLabelValue + }, + } + + // secondary watch for manifestwork + ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &workv1.ManifestWork{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(workPred)) + } + + mchGroupKind := schema.GroupKind{Group: mchv1.SchemeGroupVersion.Group, Kind: "MultiClusterHub"} + if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.SchemeGroupVersion.Version); err == nil { + mchPred := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + // this is for operator restart, the mch CREATE event will be caught and the mch should be ready + if e.Object.GetNamespace() == config.GetMCONamespace() && + e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && + e.Object.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion { + // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap(c, e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion) + if err != nil { + return false + } + return ok + } + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectNew.GetNamespace() == config.GetMCONamespace() && + e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && + e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && + e.ObjectNew.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion { + /// only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap(c, e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion) + if err != nil { + return false + } + return ok + } + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + } + + if ingressCtlCrdExists { + // secondary watch for default ingresscontroller + ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &operatorv1.IngressController{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(ingressControllerPred)). + // secondary watch for alertmanager route byo cert secrets + Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(amRouterCertSecretPred)). + // secondary watch for openshift route ca secret + Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(routeCASecretPred)) + } + + mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + if mchCrdExists { + // secondary watch for MCH + ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &mchv1.MultiClusterHub{}}, handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + Namespace: obj.GetNamespace(), + }}, + } + }), builder.WithPredicates(mchPred)) + } + } + + // create and return a new controller + return ctrBuilder.Complete(r) +} + +func StartPlacementController(mgr manager.Manager, crdMap map[string]bool) error { + if isplacementControllerRunnning { + return nil + } + isplacementControllerRunnning = true + + if err := (&PlacementRuleReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("PlacementRule"), + Scheme: mgr.GetScheme(), + CRDMap: crdMap, + RESTMapper: mgr.GetRESTMapper(), + }).SetupWithManager(mgr); err != nil { + log.Error(err, "unable to create controller", "controller", "PlacementRule") + return err + } + + return nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go new file mode 100644 index 000000000..907bf9750 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go @@ -0,0 +1,332 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "os" + "path" + "strings" + "testing" + + ocinfrav1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + imageregistryv1alpha1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/imageregistry/v1alpha1" + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering/templates" + mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" +) + +const ( + namespace = "test-ns" + namespace2 = "test-ns-2" + clusterName = "cluster1" + clusterName2 = "cluster2" + mcoName = "test-mco" +) + +var ( + mcoNamespace = config.GetDefaultNamespace() +) + +func initSchema(t *testing.T) { + s := scheme.Scheme + if err := clusterv1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add placementrule scheme: (%v)", err) + } + if err := mcov1beta2.AddToScheme(s); err != nil { + t.Fatalf("Unable to add mcov1beta2 scheme: (%v)", err) + } + if err := mcov1beta1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add mcov1beta1 scheme: (%v)", err) + } + if err := routev1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add routev1 scheme: (%v)", err) + } + if err := operatorv1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add routev1 scheme: (%v)", err) + } + if err := ocinfrav1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add ocinfrav1 scheme: (%v)", err) + } + if err := workv1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add workv1 scheme: (%v)", err) + } + if err := mchv1.SchemeBuilder.AddToScheme(s); err != nil { + t.Fatalf("Unable to add mchv1 scheme: (%v)", err) + } + if err := imageregistryv1alpha1.AddToScheme(s); err != nil { + t.Fatalf("Unable to add imageregistryv1alpha1 scheme: (%v)", err) + } +} + +var testImagemanifestsMap = map[string]string{ + "endpoint_monitoring_operator": "test.io/endpoint-monitoring:test", + "metrics_collector": "test.io/metrics-collector:test", +} + +func newTestImageManifestsConfigMap(namespace, version string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.ImageManifestConfigMapNamePrefix + version, + Namespace: namespace, + Labels: map[string]string{ + config.OCMManifestConfigMapTypeLabelKey: config.OCMManifestConfigMapTypeLabelValue, + config.OCMManifestConfigMapVersionLabelKey: version, + }, + }, + Data: testImagemanifestsMap, + } +} + +func newMCHInstanceWithVersion(namespace, version string) *mchv1.MultiClusterHub { + return &mchv1.MultiClusterHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: namespace, + }, + Spec: mchv1.MultiClusterHubSpec{}, + Status: mchv1.MultiClusterHubStatus{ + CurrentVersion: version, + DesiredVersion: version, + }, + } +} + +func TestObservabilityAddonController(t *testing.T) { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + initSchema(t) + config.SetMonitoringCRName(mcoName) + mco := newTestMCO() + pull := newTestPullSecret() + deprecatedRole := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "endpoint-observability-role", + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + } + objs := []runtime.Object{mco, pull, newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestIngressController(), newTestRouteCASecret(), newCASecret(), newCertSecret(mcoNamespace), NewMetricsAllowListCM(), + NewAmAccessorSA(), NewAmAccessorTokenSecret(), newManagedClusterAddon(), deprecatedRole} + c := fake.NewFakeClient(objs...) + r := &PlacementRuleReconciler{Client: c, Scheme: s, CRDMap: map[string]bool{config.IngressControllerCRD: true}} + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Failed to get work dir: (%v)", err) + } + os.MkdirAll(path.Join(wd, "../../placementrule-tests"), 0755) + testManifestsPath := path.Join(wd, "../../placementrule-tests/manifests") + manifestsPath := path.Join(wd, "../../manifests") + os.Setenv("TEMPLATES_PATH", testManifestsPath) + templates.ResetTemplates() + err = os.Symlink(manifestsPath, testManifestsPath) + if err != nil { + t.Fatalf("Failed to create symbollink(%s) to(%s) for the test manifests: (%v)", testManifestsPath, manifestsPath, err) + } + + req := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: config.GetDefaultCRName(), + Namespace: mcoNamespace, + }, + } + + managedClusterList = map[string]string{ + namespace: "4", + namespace2: "4", + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + found := &workv1.ManifestWork{} + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace + workNameSuffix, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork %s: (%v)", namespace, err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace2 + workNameSuffix, Namespace: namespace2}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork for %s: (%v)", namespace2, err) + } + foundRole := &rbacv1.Role{} + err = c.Get(context.TODO(), types.NamespacedName{Name: "endpoint-observability-role", Namespace: namespace}, foundRole) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("Deprecated role not removed") + } + + managedClusterList = map[string]string{namespace: "4"} + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace2 + workNameSuffix, Namespace: namespace2}, found) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("Failed to delete manifestwork for cluster2: (%v)", err) + } + + err = c.Delete(context.TODO(), pull) + if err != nil { + t.Fatalf("Failed to delete pull secret: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + err = c.Delete(context.TODO(), mco) + if err != nil { + t.Fatalf("Failed to delete mco: (%v)", err) + } + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + foundList := &workv1.ManifestWorkList{} + err = c.List(context.TODO(), foundList) + if err != nil { + t.Fatalf("Failed to list manifestwork: (%v)", err) + } + if len(foundList.Items) != 0 { + t.Fatalf("Not all manifestwork removed after remove mco resource") + } + + mco.ObjectMeta.ResourceVersion = "" + err = c.Create(context.TODO(), mco) + if err != nil { + t.Fatalf("Failed to create mco: (%v)", err) + } + + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace + workNameSuffix, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to get manifestwork for cluster1: (%v)", err) + } + + invalidName := "invalid-work" + invalidWork := &workv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Name: invalidName, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + } + err = c.Create(context.TODO(), invalidWork) + if err != nil { + t.Fatalf("Failed to create manifestwork: (%v)", err) + } + + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + err = c.Get(context.TODO(), types.NamespacedName{Name: invalidName, Namespace: namespace}, found) + if err == nil { + t.Fatalf("Invalid manifestwork not removed") + } + + // test mch update and image replacement + version := "2.4.0" + imageManifestsCM := newTestImageManifestsConfigMap(config.GetMCONamespace(), version) + err = c.Create(context.TODO(), imageManifestsCM) + if err != nil { + t.Fatalf("Failed to create the testing image manifest configmap: (%v)", err) + } + + testMCHInstance := newMCHInstanceWithVersion(config.GetMCONamespace(), version) + err = c.Create(context.TODO(), testMCHInstance) + if err != nil { + t.Fatalf("Failed to create the testing mch instance: (%v)", err) + } + + req = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + }, + } + + ok, err := config.ReadImageManifestConfigMap(c, testMCHInstance.Status.CurrentVersion) + if err != nil || !ok { + t.Fatalf("Failed to read image manifest configmap: (%T,%v)", ok, err) + } + + // set the MCHCrdName for the reconciler + r.CRDMap[config.MCHCrdName] = true + _, err = r.Reconcile(context.TODO(), req) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + + foundManifestwork := &workv1.ManifestWork{} + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace + workNameSuffix, Namespace: namespace}, foundManifestwork) + if err != nil { + t.Fatalf("Failed to get manifestwork %s: (%v)", namespace, err) + } + for _, w := range foundManifestwork.Spec.Workload.Manifests { + var rawBytes []byte + rawBytes, err := w.RawExtension.Marshal() + if err != nil { + t.Fatalf("Failed to marshal RawExtension: (%v)", err) + } + rawStr := string(rawBytes) + // make sure the image for endpoint-metrics-operator is updated + if strings.Contains(rawStr, "Deployment") { + t.Logf("raw string: \n%s\n", rawStr) + if !strings.Contains(rawStr, "test.io/endpoint-monitoring:test") { + t.Fatalf("the image for endpoint-metrics-operator should be replaced with: test.io/endpoint-monitoring:test") + } + } + // make sure the images-list configmap is updated + if strings.Contains(rawStr, "images-list") { + t.Logf("raw string: \n%s\n", rawStr) + if !strings.Contains(rawStr, "test.io/metrics-collector:test") { + t.Fatalf("the image for endpoint-metrics-operator should be replaced with: test.io/endpoint-monitoring:test") + } + } + } + + // remove the testing manifests directory + if err = os.Remove(testManifestsPath); err != nil { + t.Fatalf("Failed to delete symbollink(%s) for the test manifests: (%v)", testManifestsPath, err) + } + os.Remove(path.Join(wd, "../../placementrule-tests")) +} + +func newManagedClusterAddon() *addonv1alpha1.ManagedClusterAddOn { + return &addonv1alpha1.ManagedClusterAddOn{ + TypeMeta: metav1.TypeMeta{ + APIVersion: addonv1alpha1.SchemeGroupVersion.String(), + Kind: "ManagedClusterAddOn", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "managedClusterAddonName", + Namespace: namespace, + }, + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/role.go b/operators/multiclusterobservability/controllers/placementrule/role.go new file mode 100644 index 000000000..ee5aacc15 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/role.go @@ -0,0 +1,385 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "fmt" + "reflect" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +const ( + addonName = "observability-controller" + resRoleName = "endpoint-observability-res-role" + resRoleBindingName = "endpoint-observability-res-rolebinding" + mcoRoleName = "endpoint-observability-mco-role" + mcoRoleBindingName = "endpoint-observability-mco-rolebinding" + epRsName = "observabilityaddons" + epStatusRsName = "observabilityaddons/status" +) + +func createClusterRole(c client.Client) error { + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoRoleName, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + config.MCORsName, + }, + Verbs: []string{ + "watch", + "list", + "get", + }, + APIGroups: []string{ + mcov1beta2.GroupVersion.Group, + }, + }, + }, + } + + found := &rbacv1.ClusterRole{} + err := c.Get(context.TODO(), types.NamespacedName{Name: mcoRoleName}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating mco clusterRole") + err = c.Create(context.TODO(), role) + if err != nil { + log.Error(err, "Failed to create endpoint-observability-mco-role clusterRole") + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check endpoint-observability-mco-role clusterRole") + return err + } + + if !reflect.DeepEqual(found.Rules, role.Rules) { + log.Info("Updating endpoint-observability-mco-role clusterRole") + role.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), role) + if err != nil { + log.Error(err, "Failed to update endpoint-observability-mco-role clusterRole") + return err + } + return nil + } + + log.Info("clusterrole endpoint-observability-mco-role already existed/unchanged") + return nil +} + +func createClusterRoleBinding(c client.Client, namespace string, name string) error { + rb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace + "-" + mcoRoleBindingName, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: mcoRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "Group", + Name: fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", name, addonName), + Namespace: namespace, + }, + }, + } + found := &rbacv1.ClusterRoleBinding{} + err := c.Get(context.TODO(), types.NamespacedName{Name: namespace + "-" + + mcoRoleBindingName}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating endpoint-observability-mco-rolebinding clusterrolebinding") + err = c.Create(context.TODO(), rb) + if err != nil { + log.Error(err, "Failed to create endpoint-observability-mco-rolebinding clusterrolebinding") + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check endpoint-observability-mco-rolebinding clusterrolebinding") + return err + } + + if !reflect.DeepEqual(found.Subjects, rb.Subjects) && !reflect.DeepEqual(found.RoleRef, rb.RoleRef) { + log.Info("Updating endpoint-observability-mco-rolebinding clusterrolebinding") + rb.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), rb) + if err != nil { + log.Error(err, "Failed to update endpoint-observability-mco-rolebinding clusterrolebinding") + return err + } + return nil + } + + log.Info("clusterrolebinding endpoint-observability-mco-rolebinding already existed/unchanged", "namespace", namespace) + return nil +} + +func createResourceRole(c client.Client) error { + + deleteDeprecatedRoles(c) + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleName, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + epRsName, + epStatusRsName, + }, + Verbs: []string{ + "watch", + "list", + "get", + "update", + }, + APIGroups: []string{ + mcov1beta2.GroupVersion.Group, + }, + }, + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + "list", + "get", + }, + APIGroups: []string{ + "", + }, + }, + { + Resources: []string{ + "managedclusteraddons", + "managedclusteraddons/status", + }, + Verbs: []string{ + "watch", + "list", + "get", + "update", + }, + APIGroups: []string{ + "addon.open-cluster-management.io", + }, + }, + { + Resources: []string{ + "leases", + }, + Verbs: []string{ + "watch", + "list", + "get", + "update", + "create", + "delete", + }, + APIGroups: []string{ + "coordination.k8s.io", + }, + }, + }, + } + + found := &rbacv1.ClusterRole{} + err := c.Get(context.TODO(), types.NamespacedName{Name: resRoleName}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating endpoint-observability-res-role clusterrole") + err = c.Create(context.TODO(), role) + if err != nil { + log.Error(err, "Failed to create endpoint-observability-res-role clusterrole") + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check endpoint-observability-res-role clusterrole") + return err + } + + if !reflect.DeepEqual(found.Rules, role.Rules) { + log.Info("Updating endpoint-observability-res-role clusterrole") + role.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), role) + if err != nil { + log.Error(err, "Failed to update endpoint-observability-res-role clusterrole") + return err + } + return nil + } + + log.Info("clusterrole endpoint-observability-res-role already existed/unchanged") + return nil +} + +func createResourceRoleBinding(c client.Client, namespace string, name string) error { + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleBindingName, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: resRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "Group", + Name: fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", name, addonName), + Namespace: namespace, + }, + }, + } + found := &rbacv1.RoleBinding{} + err := c.Get(context.TODO(), types.NamespacedName{Name: resRoleBindingName, Namespace: namespace}, found) + if err != nil && errors.IsNotFound(err) { + log.Info("Creating endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + err = c.Create(context.TODO(), rb) + if err != nil { + log.Error(err, "Failed to create endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + return err + } + return nil + } else if err != nil { + log.Error(err, "Failed to check endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + return err + } + + if !reflect.DeepEqual(found.Subjects, rb.Subjects) && !reflect.DeepEqual(found.RoleRef, rb.RoleRef) { + log.Info("Updating endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + rb.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), rb) + if err != nil { + log.Error(err, "Failed to update endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + return err + } + return nil + } + + log.Info("rolebinding endpoint-observability-res-rolebinding already existed/unchanged", "namespace", namespace) + return nil +} + +func deleteClusterRole(c client.Client) error { + clusterrole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoRoleName, + }, + } + err := c.Delete(context.TODO(), clusterrole) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete clusterrole", "name", mcoRoleName) + return err + } + log.Info("Clusterrole deleted", "name", mcoRoleName) + return nil +} + +func deleteResourceRole(c client.Client) error { + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleName, + }, + } + err := c.Delete(context.TODO(), role) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete clusterrole", "name", resRoleName) + return err + } + log.Info("Role deleted", "name", resRoleName) + return nil +} + +func deleteRolebindings(c client.Client, namespace string) error { + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace + "-" + resRoleBindingName, + }, + } + err := c.Delete(context.TODO(), crb) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete clusterrolebinding", "name", namespace+"-"+resRoleBindingName) + return err + } + log.Info("Clusterrolebinding deleted", "name", namespace+"-"+resRoleBindingName) + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleBindingName, + Namespace: namespace, + }, + } + err = c.Delete(context.TODO(), rb) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete rolebinding", "name", resRoleBindingName, "namespace", namespace) + return err + } + log.Info("Rolebinding deleted", "name", resRoleBindingName, "namespace", namespace) + + return nil +} + +// function to remove the deprecated roles +func deleteDeprecatedRoles(c client.Client) { + opts := &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ownerLabelKey: ownerLabelValue}), + } + roleList := &rbacv1.RoleList{} + err := c.List(context.TODO(), roleList, opts) + if err != nil { + log.Error(err, "Failed to list deprecated roles") + return + } + for idx := range roleList.Items { + role := roleList.Items[idx] + if role.Name == "endpoint-observability-role" { + err = c.Delete(context.TODO(), &role) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete deprecated roles", "name", role.Name, "namespace", role.Namespace) + } else { + log.Info("Deprecated role deleted", "name", role.Name, "namespace", role.Namespace) + } + } + } +} + +func createRolebindings(c client.Client, namespace string, name string) error { + err := createClusterRoleBinding(c, namespace, name) + if err != nil { + return err + } + err = createResourceRoleBinding(c, namespace, name) + return err +} diff --git a/operators/multiclusterobservability/controllers/placementrule/role_test.go b/operators/multiclusterobservability/controllers/placementrule/role_test.go new file mode 100644 index 000000000..c1aa77084 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/role_test.go @@ -0,0 +1,224 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "fmt" + "testing" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +const ( + secretName = "test-secret" + token = "test-token" + ca = "test-ca" +) + +func TestCreateClusterRole(t *testing.T) { + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoRoleName, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + config.MCORsName, + }, + Verbs: []string{ + "watch", + "list", + "get", + }, + APIGroups: []string{ + mcov1beta2.GroupVersion.Group, + }, + }, + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + }, + APIGroups: []string{ + "", + }, + }, + }, + } + objs := []runtime.Object{role} + c := fake.NewFakeClient(objs...) + err := createClusterRole(c) + if err != nil { + t.Fatalf("createRole: (%v)", err) + } + found := &rbacv1.ClusterRole{} + err = c.Get(context.TODO(), types.NamespacedName{Name: mcoRoleName}, found) + if err != nil { + t.Fatalf("Failed to update mcoClusterRole: (%v)", err) + } + if len(found.Rules) != 1 { + t.Fatalf("role is no updated correctly") + } +} + +func TestCreateClusterRoleBinding(t *testing.T) { + rb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace + "-" + mcoRoleBindingName, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: mcoRoleName + "-test", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "Group", + Name: "test", + Namespace: namespace, + }, + }, + } + objs := []runtime.Object{rb} + c := fake.NewFakeClient(objs...) + err := createClusterRoleBinding(c, namespace, namespace) + if err != nil { + t.Fatalf("createRoleBinding: (%v)", err) + } + found := &rbacv1.ClusterRoleBinding{} + err = c.Get(context.TODO(), types.NamespacedName{Name: namespace + "-" + mcoRoleBindingName}, found) + if err != nil { + t.Fatalf("Failed to update ClusterRoleBinding: (%v)", err) + } + if found.RoleRef.Name != mcoRoleName || + found.Subjects[0].Name != fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", namespace, addonName) { + t.Fatalf("clusterrolebinding is no updated correctly") + } +} + +func TestCreateRole(t *testing.T) { + c := fake.NewFakeClient() + err := createResourceRole(c) + if err != nil { + t.Fatalf("createRole: (%v)", err) + } + found := &rbacv1.ClusterRole{} + err = c.Get(context.TODO(), types.NamespacedName{Name: resRoleName, Namespace: ""}, found) + if err != nil { + t.Fatalf("Failed to create Role: (%v)", err) + } + if len(found.Rules) != 4 { + t.Fatalf("role is no created correctly") + } + + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleName, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + epRsName, + }, + Verbs: []string{ + "watch", + "list", + "get", + "update", + }, + APIGroups: []string{ + mcov1beta2.GroupVersion.Group, + }, + }, + }, + } + objs := []runtime.Object{role} + c = fake.NewFakeClient(objs...) + err = createResourceRole(c) + if err != nil { + t.Fatalf("createRole: (%v)", err) + } + found = &rbacv1.ClusterRole{} + err = c.Get(context.TODO(), types.NamespacedName{Name: resRoleName, Namespace: ""}, found) + if err != nil { + t.Fatalf("Failed to update Role: (%v)", err) + } + if len(found.Rules) != 4 { + t.Fatalf("role is no updated correctly") + } +} + +func TestCreateRoleBinding(t *testing.T) { + c := fake.NewFakeClient() + err := createResourceRoleBinding(c, namespace, namespace) + if err != nil { + t.Fatalf("createRole: (%v)", err) + } + found := &rbacv1.RoleBinding{} + err = c.Get(context.TODO(), types.NamespacedName{Name: resRoleBindingName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to create RoleBinding: (%v)", err) + } + if found.RoleRef.Name != resRoleName || + found.Subjects[0].Name != fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", namespace, addonName) { + t.Fatalf("rolebinding is no created correctly") + } + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: resRoleBindingName, + Namespace: namespace, + Labels: map[string]string{ + ownerLabelKey: ownerLabelValue, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: resRoleName + "-test", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "Group", + Name: "test", + Namespace: namespace, + }, + }, + } + objs := []runtime.Object{rb} + c = fake.NewFakeClient(objs...) + err = createResourceRoleBinding(c, namespace, namespace) + if err != nil { + t.Fatalf("createRoleBinding: (%v)", err) + } + found = &rbacv1.RoleBinding{} + err = c.Get(context.TODO(), types.NamespacedName{Name: resRoleBindingName, Namespace: namespace}, found) + if err != nil { + t.Fatalf("Failed to update RoleBinding: (%v)", err) + } + if found.RoleRef.Name != resRoleName || + found.Subjects[0].Name != fmt.Sprintf("system:open-cluster-management:cluster:%s:addon:%s", namespace, addonName) { + t.Fatalf("rolebinding is no updated correctly") + } +} diff --git a/operators/multiclusterobservability/controllers/placementrule/status.go b/operators/multiclusterobservability/controllers/placementrule/status.go new file mode 100644 index 000000000..d0a3bd53d --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/status.go @@ -0,0 +1,71 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +var ( + statusMap = map[string]string{ + "Available": "Available", + "Progressing": "Progressing", + "Deployed": "Progressing", + "Disabled": "Degraded", + "Degraded": "Degraded", + "NotSupported": "Degraded", + } +) + +func updateAddonStatus(c client.Client, addonList mcov1beta1.ObservabilityAddonList) error { + for _, addon := range addonList.Items { + if addon.Status.Conditions == nil || len(addon.Status.Conditions) == 0 { + continue + } + conditions := []metav1.Condition{} + for _, c := range addon.Status.Conditions { + condition := metav1.Condition{ + Type: statusMap[c.Type], + Status: c.Status, + LastTransitionTime: c.LastTransitionTime, + Reason: c.Reason, + Message: c.Message, + } + conditions = append(conditions, condition) + } + managedclusteraddon := &addonv1alpha1.ManagedClusterAddOn{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: util.ManagedClusterAddonName, + Namespace: addon.ObjectMeta.Namespace, + }, managedclusteraddon) + if err != nil { + if errors.IsNotFound(err) { + log.Info("managedclusteraddon does not exist", "namespace", addon.ObjectMeta.Namespace) + continue + } + log.Error(err, "Failed to get managedclusteraddon", "namespace", addon.ObjectMeta.Namespace) + return err + } + if !reflect.DeepEqual(conditions, managedclusteraddon.Status.Conditions) { + managedclusteraddon.Status.Conditions = conditions + err = c.Status().Update(context.TODO(), managedclusteraddon) + if err != nil { + log.Error(err, "Failed to update status for managedclusteraddon", "namespace", addon.ObjectMeta.Namespace) + return err + } + log.Info("Updated status for managedclusteraddon", "namespace", addon.ObjectMeta.Namespace) + } + } + return nil +} diff --git a/operators/multiclusterobservability/controllers/placementrule/status_test.go b/operators/multiclusterobservability/controllers/placementrule/status_test.go new file mode 100644 index 000000000..4d17cbfa1 --- /dev/null +++ b/operators/multiclusterobservability/controllers/placementrule/status_test.go @@ -0,0 +1,68 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package placementrule + +import ( + "context" + "testing" + "time" + + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestUpdateAddonStatus(t *testing.T) { + maddon := &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.ManagedClusterAddonName, + Namespace: namespace, + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{}, + } + objs := []runtime.Object{maddon} + c := fake.NewFakeClient(objs...) + + addonList := &mcov1beta1.ObservabilityAddonList{ + Items: []mcov1beta1.ObservabilityAddon{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: obsAddonName, + Namespace: namespace, + }, + Status: mcov1beta1.ObservabilityAddonStatus{ + Conditions: []mcov1beta1.StatusCondition{ + { + Type: "Available", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: "Deployed", + Message: "Metrics collector deployed and functional", + }, + }, + }, + }, + }, + } + + err := updateAddonStatus(c, *addonList) + if err != nil { + t.Fatalf("Failed to update status for managedclusteraddon: (%v)", err) + } + + err = c.Get(context.TODO(), types.NamespacedName{ + Name: util.ManagedClusterAddonName, + Namespace: namespace, + }, maddon) + if err != nil { + t.Fatalf("Failed to get managedclusteraddon: (%v)", err) + } + if maddon.Status.Conditions == nil || len(maddon.Status.Conditions) != 1 { + t.Fatalf("Status not updated correctly in managedclusteraddon: (%v)", maddon) + } +} diff --git a/operators/multiclusterobservability/main.go b/operators/multiclusterobservability/main.go new file mode 100644 index 000000000..4d6f82d33 --- /dev/null +++ b/operators/multiclusterobservability/main.go @@ -0,0 +1,316 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/IBM/controller-filtered-cache/filteredcache" + ocinfrav1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlruntimescheme "sigs.k8s.io/controller-runtime/pkg/scheme" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + + imageregistryv1alpha1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/imageregistry/v1alpha1" + observabilityv1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" + observabilityv1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoctrl "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/controllers/multiclusterobservability" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/webhook" + operatorsutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" + mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + observatoriumAPIs "github.com/stolostron/observatorium-operator/api/v1alpha1" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workv1 "open-cluster-management.io/api/work/v1" + // +kubebuilder:scaffold:imports +) + +var ( + metricsHost = "0.0.0.0" + metricsPort int32 = 8383 + + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(observabilityv1beta1.AddToScheme(scheme)) + utilruntime.Must(observabilityv1beta2.AddToScheme(scheme)) + utilruntime.Must(observatoriumAPIs.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func main() { + // var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var webhookPort int + // flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.IntVar(&webhookPort, "webhook-server-port", 9443, "The listening port of the webhook server.") + opts := zap.Options{ + // enable development mode for more human-readable output, extra stack traces and logging information, etc + // disable this in final release + Development: true, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + + crdClient, err := util.GetOrCreateCRDClient() + if err != nil { + setupLog.Error(err, "Failed to create the CRD client") + os.Exit(1) + } + + // Add route Openshift scheme + if err := routev1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := ocinfrav1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := operatorv1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := imageregistryv1alpha1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := workv1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := clusterv1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + ingressCtlCrdExists, err := util.CheckCRDExist(crdClient, config.IngressControllerCRD) + if err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + mchCrdExists, err := util.CheckCRDExist(crdClient, config.MCHCrdName) + if err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + if mchCrdExists { + if err := mchv1.SchemeBuilder.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + } + + // add scheme of storage version migration + if err := migrationv1alpha1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err := addonv1alpha1.AddToScheme(scheme); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + mcoNamespace := config.GetMCONamespace() + gvkLabelsMap := map[schema.GroupVersionKind][]filteredcache.Selector{ + corev1.SchemeGroupVersion.WithKind("Secret"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.OpenshiftIngressOperatorNamespace)}, + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.OpenshiftIngressNamespace)}, + }, + corev1.SchemeGroupVersion.WithKind("ConfigMap"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + }, + corev1.SchemeGroupVersion.WithKind("Service"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + }, + corev1.SchemeGroupVersion.WithKind("ServiceAccount"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + }, + appsv1.SchemeGroupVersion.WithKind("Deployment"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + }, + appsv1.SchemeGroupVersion.WithKind("StatefulSet"): []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", config.GetDefaultNamespace())}, + }, + workv1.SchemeGroupVersion.WithKind("ManifestWork"): []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + }, + clusterv1.SchemeGroupVersion.WithKind("ManagedCluster"): []filteredcache.Selector{ + {LabelSelector: "vendor!=auto-detect,observability!=disabled"}, + }, + } + + if ingressCtlCrdExists { + gvkLabelsMap[operatorv1.SchemeGroupVersion.WithKind("IngressController")] = []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s,metadata.name==%s", config.OpenshiftIngressOperatorNamespace, config.OpenshiftIngressOperatorCRName)}, + } + } + if mchCrdExists { + gvkLabelsMap[mchv1.SchemeGroupVersion.WithKind("MultiClusterHub")] = []filteredcache.Selector{ + {FieldSelector: fmt.Sprintf("metadata.namespace==%s", mcoNamespace)}, + } + } + + // The following RBAC resources will not be watched by MCO, the selector will not impact the mco behaviour, which means + // MCO will fetch kube-apiserver for the correspoding resource if the resource can't be found in the cache. + // Adding selector will reduce the cache size when the managedcluster scale. + gvkLabelsMap[rbacv1.SchemeGroupVersion.WithKind("ClusterRole")] = []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + } + gvkLabelsMap[rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding")] = []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + } + gvkLabelsMap[rbacv1.SchemeGroupVersion.WithKind("Role")] = []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + } + gvkLabelsMap[rbacv1.SchemeGroupVersion.WithKind("RoleBinding")] = []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + } + + // Add filter for ManagedClusterAddOn to reduce the cache size when the managedclusters scale. + gvkLabelsMap[addonv1alpha1.SchemeGroupVersion.WithKind("ManagedClusterAddOn")] = []filteredcache.Selector{ + {LabelSelector: "owner==multicluster-observability-operator"}, + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Port: webhookPort, + Scheme: scheme, + MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "b9d51391.open-cluster-management.io", + NewCache: filteredcache.NewEnhancedFilteredCacheBuilder(gvkLabelsMap), + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = util.UpdateCRDWebhookNS(crdClient, mcoNamespace, config.MCOCrdName); err != nil { + setupLog.Error(err, "unable to update webhook service namespace in MCO CRD", "controller", "MultiClusterObservability") + } + + svmCrdExists, err := util.CheckCRDExist(crdClient, config.StorageVersionMigrationCrdName) + if err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + crdMaps := map[string]bool{ + config.MCHCrdName: mchCrdExists, + config.StorageVersionMigrationCrdName: svmCrdExists, + config.IngressControllerCRD: ingressCtlCrdExists, + } + + if err = (&mcoctrl.MultiClusterObservabilityReconciler{ + Manager: mgr, + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("MultiClusterObservability"), + Scheme: mgr.GetScheme(), + CRDMap: crdMaps, + APIReader: mgr.GetAPIReader(), + RESTMapper: mgr.GetRESTMapper(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MultiClusterObservability") + os.Exit(1) + } + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("check", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + if err := operatorsutil.RegisterDebugEndpoint(mgr.AddMetricsExtraHandler); err != nil { + setupLog.Error(err, "unable to set up debug handler") + os.Exit(1) + } + + // Setup Scheme for observatorium resources + schemeBuilder := &ctrlruntimescheme.Builder{ + GroupVersion: schema.GroupVersion{ + Group: "core.observatorium.io", + Version: "v1alpha1", + }, + } + schemeBuilder.Register(&observatoriumAPIs.Observatorium{}, &observatoriumAPIs.ObservatoriumList{}) + if err := schemeBuilder.AddToScheme(mgr.GetScheme()); err != nil { + setupLog.Error(err, "") + os.Exit(1) + } + + if err = (&observabilityv1beta2.MultiClusterObservability{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "MultiClusterObservability") + os.Exit(1) + } + + setupLog.Info("add webhook controller to manager") + if err := mgr.Add(webhook.NewWebhookController(mgr.GetClient(), nil, config.GetValidatingWebhookConfigurationForMCO())); err != nil { + setupLog.Error(err, "unable to add webhook controller to manager") + os.Exit(1) + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alert_rules.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alert_rules.yaml new file mode 100644 index 000000000..1b14228cf --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alert_rules.yaml @@ -0,0 +1,75 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: thanos-ruler-default-rules +data: + default_rules.yaml: | + groups: + - name: kubernetes-storage + rules: + - alert: KubePersistentVolumeFillingUp + annotations: + summary: PersistentVolume is filling up. + description: "The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free." + expr: kubelet_volume_stats_available_bytes{namespace="open-cluster-management-observability"}/kubelet_volume_stats_capacity_bytes{namespace="open-cluster-management-observability"} < 0.03 + for: 1m + labels: + instance: "{{ $labels.instance }}" + cluster: "{{ $labels.cluster }}" + clusterID: "{{ $labels.clusterID }}" + PersistentVolumeClaim: "{{ $labels.persistentvolumeclaim }}" + severity: critical + - alert: KubePersistentVolumeFillingUp + annotations: + summary: PersistentVolume is filling up and is predicted to run out of space in 6h. + description: "The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free." + expr: (kubelet_volume_stats_available_bytes{namespace="open-cluster-management-observability"}/kubelet_volume_stats_capacity_bytes{namespace="open-cluster-management-observability"}) < 0.15 and (predict_linear(kubelet_volume_stats_available_bytes{namespace="open-cluster-management-observability"}[6h], 4 * 24 * 3600)) <0 + for: 1h + labels: + instance: "{{ $labels.instance }}" + cluster: "{{ $labels.cluster }}" + clusterID: "{{ $labels.clusterID }}" + PersistentVolumeClaim: "{{ $labels.persistentvolumeclaim }}" + severity: warning + - name: policy-reports + rules: + - alert: ViolatedPolicyReport + annotations: + summary: "There is a policy report violation with a {{ $labels.severity }} severity level detected." + description: "The policy: {{ $labels.policy }} has a severity of {{ $labels.severity }} on cluster: {{ $labels.cluster }}" + expr: sum(policyreport_info) by (managed_cluster_id, category, clusterID, policy, severity) > 0 + for: 1m + labels: + severity: "{{ $labels.severity }}" + - name: slo-sli-trends + rules: + - expr: sli:apiserver_request_duration_seconds:trend:1m >= bool 0.9900 + record: sli:apiserver_request_duration_seconds:bin:trend:1m + labels: + target: 0.9900 + - name: grafana-dashboard + rules: + - expr: sum(cluster:kube_pod_container_resource_requests:cpu:sum) by (cluster) / sum(kube_node_status_allocatable{resource="cpu"}) by (cluster) + record: cluster:cpu_requested:ratio + labels: + usage: grafana-dashboard + - expr: sum(cluster:kube_pod_container_resource_requests:memory:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster) + record: cluster:memory_requested:ratio + labels: + usage: grafana-dashboard + - expr: 1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster) + record: cluster:memory_utilized:ratio + labels: + usage: grafana-dashboard + - expr: sum(machine_cpu_cores) by (cluster) + record: cluster:cpu_cores:sum + labels: + usage: grafana-dashboard + - expr: sum(kube_node_status_allocatable{resource="cpu"}) by (cluster) + record: cluster:cpu_allocatable:sum + labels: + usage: grafana-dashboard + - expr: sum(machine_memory_bytes) by (cluster) + record: cluster:machine_memory:sum + labels: + usage: grafana-dashboard diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrole.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrole.yaml new file mode 100644 index 000000000..a967af503 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrole.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:alertmanager-accessor + labels: + alertmanager: observability +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrolebinding.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrolebinding.yaml new file mode 100644 index 000000000..df4cd6154 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:alertmanager-accessor + labels: + alertmanager: observability +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:alertmanager-accessor +subjects: +- kind: ServiceAccount + name: observability-alertmanager-accessor + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-serviceaccount.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-serviceaccount.yaml new file mode 100644 index 000000000..2dc9385dc --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-accessor-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: observability-alertmanager-accessor + namespace: open-cluster-management + labels: + alertmanager: observability diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-cabundle.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-cabundle.yaml new file mode 100644 index 000000000..9109c6c56 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-cabundle.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +data: + service-ca.crt: "" +kind: ConfigMap +metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + labels: + alertmanager: observability + name: alertmanager-ca-bundle + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrole.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrole.yaml new file mode 100644 index 000000000..aba7ea1a1 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrole.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:alertmanager + labels: + alertmanager: observability +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrolebinding.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrolebinding.yaml new file mode 100644 index 000000000..88a18bf10 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:alertmanager + labels: + alertmanager: observability +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:alertmanager +subjects: +- kind: ServiceAccount + name: alertmanager + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-config.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-config.yaml new file mode 100644 index 000000000..9bfabcd75 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +stringData: + alertmanager.yaml: | + "global": + "resolve_timeout": "5m" + "receivers": + - "name": "null" + "route": + "group_by": + - "namespace" + "group_interval": "5m" + "group_wait": "30s" + "receiver": "null" + "repeat_interval": "12h" + "routes": + - "match": + "alertname": "Watchdog" + "receiver": "null" +kind: Secret +metadata: + name: alertmanager-config + namespace: open-cluster-management + annotations: + skip-creation-if-exist: "true" +type: Opaque \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-operated.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-operated.yaml new file mode 100644 index 000000000..5064614dd --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-operated.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: alertmanager-operated + namespace: open-cluster-management +spec: + clusterIP: None + ports: + - name: web + port: 9093 + protocol: TCP + targetPort: 9093 + - name: tcp-mesh + port: 9094 + protocol: TCP + targetPort: 9094 + - name: udp-mesh + port: 9094 + protocol: UDP + targetPort: 9094 + selector: + alertmanager: observability + app: multicluster-observability-alertmanager + sessionAffinity: None + type: ClusterIP \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-proxy.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-proxy.yaml new file mode 100644 index 000000000..408fea83f --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-proxy.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +data: {} +kind: Secret +metadata: + labels: + app.kubernetes.io/name: alertmanager-proxy + name: alertmanager-proxy + namespace: open-cluster-management + annotations: + skip-creation-if-exist: "true" +type: Opaque + diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-service.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-service.yaml new file mode 100644 index 000000000..29e5e498a --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + alertmanager: observability + name: alertmanager + namespace: open-cluster-management + annotations: + service.beta.openshift.io/serving-cert-secret-name: alertmanager-tls +spec: + ports: + - name: web + port: 9093 + protocol: TCP + targetPort: web + - name: oauth-proxy + port: 9095 + protocol: TCP + targetPort: oauth-proxy + selector: + alertmanager: observability + app: multicluster-observability-alertmanager + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 + type: ClusterIP diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-serviceaccount.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-serviceaccount.yaml new file mode 100644 index 000000000..3de3467f1 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-serviceaccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alertmanager + namespace: open-cluster-management + labels: + alertmanager: observability + annotations: + serviceaccounts.openshift.io/oauth-redirectreference.alertmanager: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}' diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml new file mode 100644 index 000000000..8c96b7380 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml @@ -0,0 +1,165 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: alertmanager + namespace: open-cluster-management + labels: + app: multicluster-observability-alertmanager + alertmanager: observability +spec: + replicas: 3 + selector: + matchLabels: + app: multicluster-observability-alertmanager + alertmanager: observability + serviceName: alertmanager-operated + template: + metadata: + labels: + app: multicluster-observability-alertmanager + alertmanager: observability + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 70 + podAffinityTerm: + topologyKey: topology.kubernetes.io/zone + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-alertmanager + - weight: 30 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-alertmanager + containers: + - args: + - --config.file=/etc/alertmanager/config/alertmanager.yaml + - --cluster.listen-address=[$(POD_IP)]:9094 + - --storage.path=/alertmanager + - --data.retention=120h + - --web.listen-address=127.0.0.1:9093 + - --web.route-prefix=/ + env: + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + image: quay.io/stolostron/prometheus-alertmanager:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 9094 + name: mesh-tcp + protocol: TCP + - containerPort: 9093 + name: web + protocol: TCP + - containerPort: 9094 + name: mesh-udp + protocol: UDP + resources: + requests: + cpu: 4m + memory: 200Mi + volumeMounts: + - mountPath: /etc/alertmanager/config + name: config-volume + - mountPath: /alertmanager + name: alertmanager-db + - args: + - -webhook-url=http://localhost:9093/-/reload + - -volume-dir=/etc/alertmanager/config + - -volume-dir=/etc/tls/private + image: quay.io/openshift/origin-configmap-reloader:4.8.0 + imagePullPolicy: IfNotPresent + name: config-reloader + resources: + requests: + cpu: 4m + memory: 25Mi + volumeMounts: + - mountPath: /etc/alertmanager/config + name: config-volume + readOnly: true + - mountPath: /etc/tls/private + name: tls-secret + readOnly: true + - args: + - --provider=openshift + - --https-address=:9095 + - --http-address= + - --upstream=http://localhost:9093 + - --openshift-sar={"resource":"namespaces","verb":"get"} + - --openshift-delegate-urls={"/":{"resource":"namespaces","verb":"get"}} + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + # - --email-domain=* + # - --scope=user:full + # - --client-id=alertmanager + # - --client-secret=alertmanagersecret + - --openshift-service-account=alertmanager + - --cookie-secret-file=/etc/proxy/secrets/session_secret + - --skip-provider-button=true + - --openshift-ca=/etc/pki/tls/cert.pem + - --openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + image: quay.io/stolostron/origin-oauth-proxy:2.0.11-SNAPSHOT-2021-04-29-18-29-17 + imagePullPolicy: IfNotPresent + name: alertmanager-proxy + ports: + - containerPort: 9095 + name: oauth-proxy + protocol: TCP + resources: + requests: + cpu: 1m + memory: 20Mi + readinessProbe: + failureThreshold: 3 + httpGet: + path: /oauth/healthz + port: 9095 + scheme: HTTPS + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/tls/private + name: tls-secret + readOnly: true + - mountPath: /etc/proxy/secrets + name: alertmanager-proxy + serviceAccount: alertmanager + serviceAccountName: alertmanager + volumes: + - name: config-volume + secret: + defaultMode: 420 + secretName: alertmanager-config + - name: alertmanager-proxy + secret: + defaultMode: 420 + secretName: alertmanager-proxy + - name: tls-secret + secret: + defaultMode: 420 + secretName: alertmanager-tls + volumeClaimTemplates: + - metadata: + name: alertmanager-db + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: "gp2" diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml new file mode 100644 index 000000000..392b675f3 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml @@ -0,0 +1,14 @@ +resources: +- alertmanager-serviceaccount.yaml +- alertmanager-config.yaml +- alertmanager-proxy.yaml +- alertmanager-statefulset.yaml +- alertmanager-operated.yaml +- alertmanager-service.yaml +- alert_rules.yaml +- alertmanager-cabundle.yaml +- alertmanager-clusterrole.yaml +- alertmanager-clusterrolebinding.yaml +- alertmanager-accessor-clusterrole.yaml +- alertmanager-accessor-clusterrolebinding.yaml +- alertmanager-accessor-serviceaccount.yaml diff --git a/operators/multiclusterobservability/manifests/base/config/kustomization.yaml b/operators/multiclusterobservability/manifests/base/config/kustomization.yaml new file mode 100644 index 000000000..bd2cbf18e --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/config/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- metrics_allowlist.yaml diff --git a/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml new file mode 100644 index 000000000..e2a1de4af --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml @@ -0,0 +1,169 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: observability-metrics-allowlist +data: + metrics_list.yaml: | + names: + - :node_memory_MemAvailable_bytes:sum + - ALERTS + - authenticated_user_requests + - authentication_attempts + - assisted_installer_cluster_creations # counter + - assisted_installer_cluster_installation_started # counter + - assisted_installer_cluster_installation_second # histogram + - assisted_installer_cluster_host_installation_count # histogram + - assisted_installer_host_installation_phase_seconds # histogram + - assisted_installer_cluster_host_disk_sync_duration_ms # histogram + - assisted_installer_cluster_host_image_pull_status # histogram + - assisted_installer_filesystem_usage_percentage # Gauge + - cluster:capacity_cpu_cores:sum + - cluster:capacity_memory_bytes:sum + - cluster:container_cpu_usage:ratio + - cluster:container_spec_cpu_shares:ratio + - cluster:cpu_usage_cores:sum + - cluster:memory_usage:ratio + - cluster:memory_usage_bytes:sum + - cluster:usage:resources:sum + - cluster_infrastructure_provider + - cluster_version + - cluster_version_payload + - container_cpu_cfs_periods_total + - container_cpu_cfs_throttled_periods_total + - container_spec_cpu_quota + - coredns_dns_request_count_total + - coredns_dns_request_duration_seconds_sum + - coredns_dns_request_type_count_total + - coredns_dns_response_rcode_count_total + - etcd_debugging_mvcc_db_total_size_in_bytes + - etcd_mvcc_db_total_size_in_bytes + - etcd_debugging_snap_save_total_duration_seconds_sum + - etcd_disk_backend_commit_duration_seconds_bucket + - etcd_disk_backend_commit_duration_seconds_sum + - etcd_disk_wal_fsync_duration_seconds_bucket + - etcd_disk_wal_fsync_duration_seconds_sum + - etcd_object_counts + - etcd_network_client_grpc_received_bytes_total + - etcd_network_client_grpc_sent_bytes_total + - etcd_network_peer_received_bytes_total + - etcd_network_peer_sent_bytes_total + - etcd_server_client_requests_total + - etcd_server_has_leader + - etcd_server_health_failures + - etcd_server_leader_changes_seen_total + - etcd_server_proposals_failed_total + - etcd_server_proposals_pending + - etcd_server_proposals_committed_total + - etcd_server_proposals_applied_total + - etcd_server_quota_backend_bytes + - grpc_server_started_total + - haproxy_backend_connection_errors_total + - haproxy_backend_connections_total + - haproxy_backend_current_queue + - haproxy_backend_http_average_response_latency_milliseconds + - haproxy_backend_max_sessions + - haproxy_backend_response_errors_total + - haproxy_backend_up + - http_requests_total + - instance:node_filesystem_usage:sum + - instance:node_cpu_utilisation:rate1m + - instance:node_load1_per_cpu:ratio + - instance:node_memory_utilisation:ratio + - instance:node_network_receive_bytes_excluding_lo:rate1m + - instance:node_network_receive_drop_excluding_lo:rate1m + - instance:node_network_transmit_bytes_excluding_lo:rate1m + - instance:node_network_transmit_drop_excluding_lo:rate1m + - instance:node_num_cpu:sum + - instance:node_vmstat_pgmajfault:rate1m + - instance_device:node_disk_io_time_seconds:rate1m + - instance_device:node_disk_io_time_weighted_seconds:rate1m + - kube_daemonset_status_desired_number_scheduled + - kube_daemonset_status_number_unavailable + - kube_node_spec_unschedulable + - kube_node_status_allocatable + - kube_node_status_allocatable_cpu_cores + - kube_node_status_allocatable_memory_bytes + - kube_node_status_capacity + - kube_node_status_capacity_pods + - kube_node_status_capacity_cpu_cores + - kube_node_status_condition + - kube_pod_container_resource_limits + - kube_pod_container_resource_limits_cpu_cores + - kube_pod_container_resource_limits_memory_bytes + - kube_pod_container_resource_requests + - kube_pod_container_resource_requests_cpu_cores + - kube_pod_container_resource_requests_memory_bytes + - kube_pod_info + - kube_pod_owner + - kube_resourcequota + - kubelet_running_container_count + - kubelet_runtime_operations + - kubelet_runtime_operations_latency_microseconds + - kubelet_volume_stats_available_bytes + - kubelet_volume_stats_capacity_bytes + - kube_persistentvolume_status_phase + - machine_cpu_cores + - machine_memory_bytes + - mixin_pod_workload + - namespace:kube_pod_container_resource_requests_cpu_cores:sum + - namespace:kube_pod_container_resource_requests_memory_bytes:sum + - namespace:container_memory_usage_bytes:sum + - namespace_cpu:kube_pod_container_resource_requests:sum + - namespace_workload_pod:kube_pod_owner:relabel + - node_cpu_seconds_total + - node_filesystem_avail_bytes + - node_filesystem_free_bytes + - node_filesystem_size_bytes + - node_memory_MemAvailable_bytes + - node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + - node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate + - node_netstat_Tcp_OutSegs + - node_netstat_Tcp_RetransSegs + - node_netstat_TcpExt_TCPSynRetrans + - policyreport_info + - up + - cluster_monitoring_operator_reconcile_errors_total + - cluster_monitoring_operator_reconcile_attempts_total + - cluster_operator_conditions + - cluster_operator_up + - cluster:policy_governance_info:propagated_count + - cluster:policy_governance_info:propagated_noncompliant_count + - policy:policy_governance_info:propagated_count + - policy:policy_governance_info:propagated_noncompliant_count + matches: + - __name__="workqueue_queue_duration_seconds_bucket",job="apiserver" + - __name__="workqueue_adds_total",job="apiserver" + - __name__="workqueue_depth",job="apiserver" + - __name__="go_goroutines",job="apiserver" + - __name__="process_cpu_seconds_total",job="apiserver" + - __name__="process_resident_memory_bytes",job="apiserver" + - __name__="container_memory_cache",container!="" + - __name__="container_memory_rss",container!="" + - __name__="container_memory_swap",container!="" + - __name__="container_memory_working_set_bytes",container!="" + renames: + mixin_pod_workload: namespace_workload_pod:kube_pod_owner:relabel + namespace:kube_pod_container_resource_requests_cpu_cores:sum: namespace_cpu:kube_pod_container_resource_requests:sum + node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + etcd_mvcc_db_total_size_in_bytes: etcd_debugging_mvcc_db_total_size_in_bytes + rules: + - record: apiserver_request_duration_seconds:histogram_quantile_99 + expr: histogram_quantile(0.99,sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (le)) + - record: apiserver_request_duration_seconds:histogram_quantile_99:instance + expr: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (le, verb, instance)) + - record: sum:apiserver_request_total:1h + expr: sum(rate(apiserver_request_total{job=\"apiserver\"}[1h])) by(code, instance) + - record: sum:apiserver_request_total:5m + expr: sum(rate(apiserver_request_total{job=\"apiserver\"}[5m])) by(code, instance) + - record: rpc_rate:grpc_server_handled_total:sum_rate + expr: sum(rate(grpc_server_handled_total{job=\"etcd\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m])) + - record: active_streams_watch:grpc_server_handled_total:sum + expr: sum(grpc_server_started_total{job=\"etcd\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"etcd\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) + - record: active_streams_lease:grpc_server_handled_total:sum + expr: sum(grpc_server_started_total{job=\"etcd\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"etcd\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) + - record: cluster:kube_pod_container_resource_requests:cpu:sum + expr: sum(sum(sum(kube_pod_container_resource_requests{resource=\"cpu\"}) by (pod,namespace,container) * on(pod,namespace) group_left(phase) max(kube_pod_status_phase{phase=~\"Running|Pending|Unknown\"} >0) by (pod,namespace,phase)) by (pod,namespace,phase)) + - record: cluster:kube_pod_container_resource_requests:memory:sum + expr: sum(sum(sum(kube_pod_container_resource_requests{resource=\"memory\"}) by (pod,namespace,container) * on(pod,namespace) group_left(phase) max(kube_pod_status_phase{phase=~\"Running|Pending|Unknown\"} >0) by (pod,namespace,phase)) by (pod,namespace,phase)) + - record: sli:apiserver_request_duration_seconds:trend:1m + expr: sum(increase(apiserver_request_duration_seconds_bucket{job=\"apiserver\",service=\"kubernetes\",le=\"1\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) / sum(increase(apiserver_request_duration_seconds_count{job=\"apiserver\",service=\"kubernetes\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) diff --git a/operators/multiclusterobservability/manifests/base/grafana/cluster-role-binding.yaml b/operators/multiclusterobservability/manifests/base/grafana/cluster-role-binding.yaml new file mode 100644 index 000000000..2051aa374 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:grafana +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:grafana +subjects: +- kind: ServiceAccount + name: grafana + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/base/grafana/cluster-role.yaml b/operators/multiclusterobservability/manifests/base/grafana/cluster-role.yaml new file mode 100644 index 000000000..4e4076402 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/cluster-role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:grafana +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/grafana/config.yaml b/operators/multiclusterobservability/manifests/base/grafana/config.yaml new file mode 100644 index 000000000..e1fe876db --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + grafana.ini: W2F1dGhdCmRpc2FibGVfbG9naW5fZm9ybSA9IHRydWUKZGlzYWJsZV9zaWdub3V0X21lbnUgPSB0cnVlClthdXRoLmJhc2ljXQplbmFibGVkID0gZmFsc2UKW2F1dGgucHJveHldCmF1dG9fc2lnbl91cCA9IHRydWUKZW5hYmxlZCA9IHRydWUKaGVhZGVyX25hbWUgPSBYLUZvcndhcmRlZC1Vc2VyCltwYXRoc10KZGF0YSA9IC92YXIvbGliL2dyYWZhbmEKbG9ncyA9IC92YXIvbGliL2dyYWZhbmEvbG9ncwpwbHVnaW5zID0gL3Zhci9saWIvZ3JhZmFuYS9wbHVnaW5zCnByb3Zpc2lvbmluZyA9IC9ldGMvZ3JhZmFuYS9wcm92aXNpb25pbmcKW3NlY3VyaXR5XQphZG1pbl91c2VyID0gV0hBVF9ZT1VfQVJFX0RPSU5HX0lTX1ZPSURJTkdfU1VQUE9SVF8wMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwCmNvb2tpZV9zZWN1cmUgPSB0cnVlCltzZXJ2ZXJdCmh0dHBfcG9ydCA9IDMwMDEKcm9vdF91cmwgPSAlKHByb3RvY29sKXM6Ly8lKGRvbWFpbilzL2dyYWZhbmEvCmRvbWFpbiA9IGxvY2FsaG9zdApbdXNlcnNdCnZpZXdlcnNfY2FuX2VkaXQgPSB0cnVlCltkYXRhcHJveHldCnRpbWVvdXQgPSAzMDAKZGlhbF90aW1lb3V0ID0gMzAKa2VlcF9hbGl2ZV9zZWNvbmRzID0gMzAwCg== +kind: Secret +metadata: + name: grafana-config + namespace: open-cluster-management +type: Opaque diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml new file mode 100644 index 000000000..5ad8a2dfb --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml @@ -0,0 +1,1986 @@ +apiVersion: v1 +data: + acm-clusters-overview-ocp311.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "iteration": 1626109404690, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 138, + "panels": [], + "title": "Control Plane Health", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Max latency (99th percentile)" + }, + { + "id": "unit", + "value": "s" + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "API Errors [1h]" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.displayMode", + "value": "color-text" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 2 + } + ] + } + }, + { + "id": "noValue", + "value": "0" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "API servers up" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "color-text" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(245, 54, 54, 0.9)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0 + }, + { + "color": "rgba(50, 172, 45, 0.97)", + "value": 1 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/09ec8aa1e996d6ffcd6817bbaff4db1b/kubernetes-api-server?var-cluster=${__data.fields.cluster}&var-instance=All" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 146, + "interval": "4m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "max(apiserver_request_duration_seconds:histogram_quantile_99{clusterType=\"ocp3\"}) by (cluster)", + "format": "table", + "instant": true, + "refId": "A" + }, + { + "expr": "(sum(up{clusterType=\"ocp3\",job=\"apiserver\"} == 1) by (cluster) / count(up{clusterType=\"ocp3\",job=\"apiserver\"}) by (cluster))", + "format": "table", + "instant": true, + "refId": "C" + }, + { + "expr": "sum by (cluster)(sum:apiserver_request_total:1h{clusterType=\"ocp3\",code=~\"5..\"})", + "format": "table", + "instant": true, + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "API Server", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #A", + "Value #C", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": null, + "description": "Leader election changes per cluster over the time range selected for dashboard.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to cluster", + "url": "/d/N8BxQ2jMz/kubernetes-etcd-cluster?var-cluster=${__data.fields.cluster}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Leader Election Changes" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "red", + "value": 2 + } + ] + } + }, + { + "id": "custom.displayMode", + "value": "color-text" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "DB Size" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Has a Leader" + }, + { + "id": "mappings", + "value": [ + { + "from": "", + "id": 1, + "text": "Yes", + "to": "", + "type": 1, + "value": "1" + }, + { + "from": "", + "id": 2, + "text": "No", + "to": "", + "type": 1, + "value": "0" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 150, + "interval": "1m", + "options": { + "frameIndex": 2, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "sum(changes(etcd_server_leader_changes_seen_total{clusterType=\"ocp3\",job=\"etcd\"}[$__range])) by (cluster)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + }, + { + "expr": "max(etcd_debugging_mvcc_db_total_size_in_bytes{clusterType=\"ocp3\",job=\"etcd\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + }, + { + "expr": "max(etcd_server_has_leader{clusterType=\"ocp3\",job=\"etcd\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "C" + } + ], + "title": "etcd", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #B", + "Value #A", + "Value #C" + ] + } + } + }, + { + "id": "merge", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Value #A": 2, + "Value #B": 3, + "Value #C": 1, + "cluster": 0 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 140, + "panels": [], + "title": "Optimization", + "type": "row" + }, + { + "datasource": "$datasource", + "description": "Highlights % differences between CPU requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": null + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Overestimation" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 151, + "interval": "4m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "(sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)) - (1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster))", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C" + }, + { + "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #C", + "Value #A", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": "$datasource", + "description": "Highlights % differences between Memory requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": null + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Overestimation" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 153, + "interval": "4m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "(sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)) - (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster))", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C" + }, + { + "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #C", + "Value #A", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 34, + "panels": [], + "repeat": null, + "title": "Capacity / Utilization", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Total Cores" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Allocatable Cores" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 47, + "interval": "4m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "sum(machine_cpu_cores{clusterType=\"ocp3\"}) by (cluster)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "F" + }, + { + "expr": "sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + }, + { + "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "cluster" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Time 1": false + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Usage %", + "Value #C": "Disk", + "Value #D": "Memory", + "Value #E": "Network", + "Value #F": "Total Cores", + "Value #G": "Requests %", + "cluster": "Cluster" + } + } + }, + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #F", + "Value #B", + "Value #A", + "Value #G" + ] + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 64, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(5, (1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster)))", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{cluster}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top 5 Utilized Clusters (% CPU usage)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": 1 + } + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Available Memory" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 60, + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "sum(machine_memory_bytes{clusterType=\"ocp3\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "F" + }, + { + "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + }, + { + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "G" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "cluster" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Usage %", + "Value #C": "Disk", + "Value #D": "Memory", + "Value #E": "Network", + "Value #F": "Total Memory", + "Value #G": "Requests %", + "cluster": "Cluster" + } + } + }, + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #F", + "Value #A", + "Value #G" + ] + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 25 + }, + "hiddenSeries": false, + "id": 65, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + {} + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(5, (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top 5 Utilized Clusters (% Memory usage)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Received" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Transmitted" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to cluster", + "url": "/d/ff635a025bcfea7bc3dd4f508990a3e9/kubernetes-networking-cluster?var-cluster=${__data.fields.cluster}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Transmitted Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Received Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 148, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Current Bandwidth Received" + } + ] + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{clusterType=\"ocp3\",job=\"node-exporter\"}) by (cluster)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + }, + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{clusterType=\"ocp3\",job=\"node-exporter\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + }, + { + "expr": "sum(instance:node_network_receive_drop_excluding_lo:rate1m{clusterType=\"ocp3\",job=\"node-exporter\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "C" + }, + { + "expr": "sum(instance:node_network_transmit_drop_excluding_lo:rate1m{clusterType=\"ocp3\",job=\"node-exporter\"}) by (cluster)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "D" + } + ], + "title": "Bandwidth Utilization", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "Value #A", + "Value #B", + "Value #C", + "Value #D" + ] + } + } + }, + { + "id": "merge", + "options": {} + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Value #A" + } + ] + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 26, + "style": "light", + "tags": [ + "ACM" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "10m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "ACM - Clusters Overview - OCP 3.11", + "uid": "40dacc6d-40ad-44ef-a459-67c2f5819cef", + "version": 5 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-acm-clusters-overview-ocp311 + namespace: open-cluster-management-observability + labels: + general-folder: 'true' + diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml new file mode 100644 index 000000000..142854c94 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml @@ -0,0 +1,1806 @@ +apiVersion: v1 +data: + acm-clusters-overview.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "iteration": 1626109404690, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 138, + "panels": [], + "title": "Control Plane Health", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": null, + "filterable": false + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "mappings": [], + "color": { + "mode": "thresholds" + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Max latency (99th percentile)" + }, + { + "id": "unit", + "value": "s" + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "API Errors [1h]" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.displayMode", + "value": "color-text" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 1 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 2 + } + ] + } + }, + { + "id": "noValue", + "value": "0" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "api_up" + }, + "properties": [ + { + "id": "displayName", + "value": "API servers up" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "custom.displayMode", + "value": "color-text" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(245, 54, 54, 0.9)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0 + }, + { + "color": "rgba(50, 172, 45, 0.97)", + "value": 1 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/09ec8aa1e996d6ffcd6817bbaff4db1b/kubernetes-api-server?var-cluster=${__data.fields.cluster}&var-instance=All" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 146, + "interval": "4m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.5.5", + "targets": [ + { + "exemplar": true, + "expr": "topk(50, max(apiserver_request_duration_seconds:histogram_quantile_99) by (cluster))\n* on(cluster) group_left(api_up) count_values without() (\"api_up\", (sum(up{job=\"apiserver\"} == 1) by (cluster) / count(up{job=\"apiserver\"}) by (cluster)))", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum by (cluster)(sum:apiserver_request_total:1h{code=~\"5..\"})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "title": "Top 50 Max Latency API Server", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": { + "Time": 0, + "Value #A": 2, + "Value #B": 4, + "api_up": 3, + "cluster": 1 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "description": "Leader election changes per cluster over the time range selected for dashboard.", + "fieldConfig": { + "defaults": { + "custom": { + "align": null, + "filterable": false + }, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "mappings": [], + "color": { + "mode": "thresholds" + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to cluster", + "url": "/d/N8BxQ2jMz/kubernetes-etcd-cluster?var-cluster=${__data.fields.cluster}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Leader Election Changes" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "red", + "value": 2 + } + ] + } + }, + { + "id": "custom.displayMode", + "value": "color-text" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "db_size" + }, + "properties": [ + { + "id": "displayName", + "value": "DB Size" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "has_leader" + }, + "properties": [ + { + "id": "displayName", + "value": "Has a Leader" + }, + { + "id": "mappings", + "value": [ + { + "from": "", + "id": 1, + "text": "Yes", + "to": "", + "type": 1, + "value": "1" + }, + { + "from": "", + "id": 2, + "text": "No", + "to": "", + "type": 1, + "value": "0" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 150, + "interval": "1m", + "options": { + "showHeader": true, + "frameIndex": 2, + "sortBy": [] + }, + "pluginVersion": "7.5.5", + "targets": [ + { + "expr": "sum(changes(etcd_server_leader_changes_seen_total{job=\"etcd\"}[$__range])) by (cluster)\n* on(cluster) group_left(db_size) count_values without() (\"db_size\", max(etcd_debugging_mvcc_db_total_size_in_bytes{job=\"etcd\"}) by (cluster))\n* on(cluster) group_left(has_leader) count_values without() (\"has_leader\", max(etcd_server_has_leader{job=\"etcd\"}) by (cluster))", + "legendFormat": "", + "interval": "", + "exemplar": true, + "format": "table", + "instant": true, + "refId": "A" + } + ], + "title": "etcd", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "db_size", + "has_leader", + "Value" + ] + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "cluster": 0, + "has_leader": 1, + "Value": 2, + "db_size": 3 + }, + "renameByName": {} + } + } + ], + "type": "table", + "datasource": null + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 140, + "panels": [], + "title": "Optimization", + "type": "row" + }, + { + "datasource": "$datasource", + "description": "Highlights % differences between CPU requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Overestimation" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cpu_requested" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cpu_utilized" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 151, + "interval": "5m", + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "exemplar": true, + "expr": "topk(50, cluster:cpu_requested:ratio - ignoring(usage) (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[$__rate_interval])) by (cluster)))\n* on(cluster) group_left(cpu_requested) count_values without() (\"cpu_requested\", cluster:cpu_requested:ratio)\n* on(cluster) group_left(cpu_utilized) count_values without() (\"cpu_utilized\", (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[$__rate_interval])) by (cluster)))", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Top 50 CPU Overestimation Clusters", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": { + "Time": 0, + "Value": 2, + "cluster": 1, + "cpu_requested": 3, + "cpu_utilized": 4 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": "$datasource", + "description": "Highlights % differences between Memory requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Cluster" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Overestimation" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.displayMode", + "value": "color-background" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0.2 + } + ] + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory_requested" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory_utilized" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 153, + "interval": "5m", + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "exemplar": true, + "expr": "topk(50, cluster:memory_requested:ratio - ignoring(usage) cluster:memory_utilized:ratio)\n* on(cluster) group_left(memory_requested) count_values without() (\"memory_requested\", cluster:memory_requested:ratio)\n* on(cluster) group_left(memory_utilized) count_values without() (\"memory_utilized\", cluster:memory_utilized:ratio)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Top 50 Memory Overestimation Clusters", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": { + "Time": 0, + "Value": 2, + "cluster": 1, + "memory_requested": 3, + "memory_utilized": 4 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 34, + "panels": [], + "repeat": null, + "title": "Capacity / Utilization", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "machine_cpu_cores_sum" + }, + "properties": [ + { + "id": "displayName", + "value": "Total Cores" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "node_allocatable_cpu_cores_sum" + }, + "properties": [ + { + "id": "displayName", + "value": "Allocatable Cores" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cpu_requested" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 47, + "interval": "5m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "exemplar": true, + "expr": "topk(50, (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[$__rate_interval])) by (cluster)))\n* on(cluster) group_left(machine_cpu_cores_sum) count_values without() (\"machine_cpu_cores_sum\", cluster:cpu_cores:sum)\n* on(cluster) group_left(node_allocatable_cpu_cores_sum) count_values without() (\"node_allocatable_cpu_cores_sum\", cluster:cpu_allocatable:sum)\n* on(cluster) group_left(cpu_requested) count_values without() (\"cpu_requested\", cluster:cpu_requested:ratio)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Top 50 CPU Utilized Clusters", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "usage": true + }, + "indexByName": { + "Time": 0, + "Value": 5, + "cluster": 1, + "cpu_requested": 4, + "machine_cpu_cores_sum": 2, + "node_allocatable_cpu_cores_sum": 3 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 64, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(5, (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\",clusterType!=\"ocp3\"}[$__rate_interval])) by (cluster)))", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{cluster}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top 5 Utilized Clusters (% CPU usage)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": 1 + } + }, + { + "datasource": "$datasource", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to cluster", + "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "machine_memory_sum" + }, + "properties": [ + { + "id": "displayName", + "value": "Available Memory" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "machine_memory_requested" + }, + "properties": [ + { + "id": "displayName", + "value": "Requested" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Utilized" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 60, + "interval": "5m", + "options": { + "showHeader": true + }, + "pluginVersion": "7.3.10", + "targets": [ + { + "exemplar": true, + "expr": "topk(50, cluster:memory_utilized:ratio)\n* on(cluster) group_left(machine_memory_sum) count_values without() (\"machine_memory_sum\", cluster:machine_memory:sum)\n* on(cluster) group_left(machine_memory_requested) count_values without() (\"machine_memory_requested\", cluster:memory_requested:ratio)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Top 50 Memory Utilized Clusters", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "usage": true + }, + "indexByName": { + "Time": 0, + "Value": 4, + "cluster": 1, + "machine_memory_requested": 3, + "machine_memory_sum": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 25 + }, + "hiddenSeries": false, + "id": 65, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.10", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + {} + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "topk(5, (1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource=\"memory\"}) by (cluster)))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top 5 Utilized Clusters (% Memory usage)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "fieldConfig": { + "defaults": { + "custom": { + "align": null, + "filterable": false + }, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "mappings": [], + "color": { + "mode": "thresholds" + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Received" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "node_transmit" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Transmitted" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "displayName", + "value": "Cluster" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to cluster", + "url": "/d/ff635a025bcfea7bc3dd4f508990a3e9/kubernetes-networking-cluster?var-cluster=${__data.fields.cluster}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "node_transmit_drop" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Transmitted Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "node_receive_drop" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Received Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 148, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Current Bandwidth Received" + } + ] + }, + "pluginVersion": "7.5.5", + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster)\n* on(cluster) group_left(node_transmit) count_values without() (\"node_transmit\", sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_receive_drop) count_values without() (\"node_receive_drop\", sum(instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_transmit_drop) count_values without() (\"node_transmit_drop\", sum(instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))", + "legendFormat": "", + "interval": "", + "exemplar": true, + "format": "table", + "instant": true, + "refId": "A" + } + ], + "title": "Bandwidth Utilization", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "cluster", + "node_receive_drop", + "node_transmit", + "node_transmit_drop", + "Value" + ] + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "Value #A" + } + ] + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "cluster": 0, + "Value": 1, + "node_transmit": 2, + "node_receive_drop": 3, + "node_transmit_drop": 4 + }, + "renameByName": {} + } + } + ], + "type": "table", + "description": "", + "datasource": null + } + ], + "refresh": "5m", + "schemaVersion": 26, + "style": "light", + "tags": [ + "ACM" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "10m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "ACM - Clusters Overview", + "uid": "2b679d600f3b9e7676a7c5ac3643d448", + "version": 5 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-acm-clusters-overview + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml new file mode 100644 index 000000000..8b97e43e1 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml @@ -0,0 +1,1625 @@ +apiVersion: v1 +data: + acm-optimization-overview.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 2, + "iteration": 1621618422811, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "title": "CPU", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "#F2495C", + "value": 0.2 + }, + { + "color": "#F2495C" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 8, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})) - (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\",cluster=\"$cluster\"}[$__rate_interval])))", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overestimation", + "description": "Highlights % differences between CPU requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 8, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 20, + "x": 4, + "y": 1 + }, + "hiddenSeries": false, + "id": 16, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "interval": "", + "legendFormat": "{{namespace}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 5 + }, + "id": 6, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 9 + }, + "id": 4, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "type": "stat" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Pods" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "custom.width", + "value": 350 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Pods" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 26, + "interval": "4m", + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "namespace", + "Value #E", + "Value #C", + "Value #D", + "Value #B" + ] + } + } + }, + { + "id": "seriesToColumns", + "options": { + "byField": "namespace" + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 18, + "panels": [], + "title": "Memory", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "#F2495C", + "value": 0.2 + }, + { + "color": "#F2495C" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 23 + }, + "id": 14, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))\n-\n(1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overestimation", + "description": "Highlights % differences between Memory requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 8, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 20, + "x": 4, + "y": 23 + }, + "hiddenSeries": false, + "id": 22, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 27 + }, + "id": 12, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 31 + }, + "id": 10, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "type": "stat" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Pods" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 350 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Pods" + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 28, + "interval": "4m", + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests by Namespace", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "namespace" + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "namespace", + "Value #E", + "Value #C", + "Value #D", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 46 + }, + "id": 30, + "panels": [], + "title": "Networking", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Received" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Transmitted" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Received Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Transmitted Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "d/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&refresh=1m&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 300 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 47 + }, + "id": 34, + "minSpan": 24, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #G", + "Value #H", + "instance" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "ACM" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "ACM - Resource Optimization / Cluster", + "uid": "8Qvi3edMz", + "version": 3 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-acm-optimization-overview + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml new file mode 100644 index 000000000..f2eb93d78 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml @@ -0,0 +1,1066 @@ +apiVersion: v1 +data: + cluster-rsrc-use.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "http://www.brendangregg.com/USEmethod/use-linux.html", + "editable": true, + "gnetId": 12135, + "graphTooltip": 0, + "id": 24, + "iteration": 1601607511586, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "repeat": null, + "title": "CPU", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n instance:node_cpu_utilisation:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}\n*\n instance:node_num_cpu:sum{cluster=\"$cluster\",job=\"node-exporter\"}\n)\n/ scalar(sum(instance:node_num_cpu:sum{cluster=\"$cluster\",job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_load1_per_cpu:ratio{cluster=\"$cluster\",job=\"node-exporter\"}\n/ scalar(count(instance:node_load1_per_cpu:ratio{cluster=\"$cluster\",job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Saturation (load1 per CPU)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 11, + "panels": [], + "repeat": null, + "title": "Memory", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_memory_utilisation:ratio{cluster=\"$cluster\",job=\"node-exporter\"}\n/ scalar(count(instance:node_memory_utilisation:ratio{cluster=\"$cluster\",job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 10 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_vmstat_pgmajfault:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Saturation (Major Page Faults)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/ Receive/", + "stack": "A" + }, + { + "alias": "/ Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Receive", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Transmit", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Net Utilisation (Bytes Receive/Transmit)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 20 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/ Receive/", + "stack": "A" + }, + { + "alias": "/ Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Receive", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} Transmit", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Net Saturation (Drops Receive/Transmit)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 13, + "panels": [], + "repeat": null, + "title": "Disk IO", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 30 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_seconds:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_seconds:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} {{device}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 30 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate1m{cluster=\"$cluster\",job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} {{device}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "Disk Space", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 41 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum without (device) (\n max without (fstype, mountpoint) (\n node_filesystem_size_bytes{cluster=\"$cluster\",job=\"node-exporter\", fstype!=\"\"} - node_filesystem_avail_bytes{cluster=\"$cluster\",job=\"node-exporter\", fstype!=\"\"}\n )\n) \n/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{cluster=\"$cluster\",job=\"node-exporter\", fstype!=\"\"})))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Space Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 21, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "", + "value": "" + }, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "tags": [], + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + { + "selected": true, + "text": "", + "value": "" + }, + { + "selected": false, + "text": "", + "value": "" + } + ], + "query": "label_values(cluster)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "USE Method / Cluster", + "uid": "3e97d1d02672cdd0861f4c97c64f89b2", + "version": 6 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-cluster-rsrc-use + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-apiserver.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-apiserver.yaml new file mode 100644 index 000000000..eba8ad20d --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-apiserver.yaml @@ -0,0 +1,1110 @@ +apiVersion: v1 +data: + k8s-apiserver.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "kubernetes api dashbord", + "editable": true, + "gnetId": 12116, + "graphTooltip": 0, + "id": 4, + "iteration": 1631050614206, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 0, + "y": 0 + }, + "id": 2, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(sum(up{job=\"apiserver\",cluster=\"$cluster\"} == 1) / count(up{job=\"apiserver\",cluster=\"$cluster\"}))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "title": "API Servers Up", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 10, + "x": 4, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "interval": "4m", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + {} + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "apiserver_request_duration_seconds:histogram_quantile_99:instance{instance=~\"$instance\", cluster=\"$cluster\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{verb}}", + "refId": "A" + }, + { + "expr": "1", + "legendFormat": "Latency Threshold", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Latency (99th percentile)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [], + "thresholds": { + "mode": "absolute", + "steps": [] + } + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 10, + "x": 14, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "interval": "4m", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(sum:apiserver_request_total:5m{instance=~\"$instance\",code=~\"2..\", cluster=\"$cluster\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "2xx", + "refId": "A" + }, + { + "expr": "sum(sum:apiserver_request_total:5m{instance=~\"$instance\",code=~\"3..\", cluster=\"$cluster\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "3xx", + "refId": "B" + }, + { + "expr": "sum(sum:apiserver_request_total:5m{instance=~\"$instance\",code=~\"4..\", cluster=\"$cluster\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "4xx", + "refId": "C" + }, + { + "expr": "sum(sum:apiserver_request_total:5m{instance=~\"$instance\",code=~\"5..\", cluster=\"$cluster\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "5xx", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Rate by HTTP Return Code", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "The time it takes to fulfill the different actions to keep the desired status of the cluster.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "interval": "4m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"apiserver\", instance=~\"$instance\", cluster=\"$cluster\"}[$__rate_interval])) by (instance, name, le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Work Queue Latency by Requestor", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 15, + "panels": [], + "title": "Saturation", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Number of actions waiting in the queue to be performed.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 6, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_depth{job=\"apiserver\", instance=~\"$instance\", cluster=\"$cluster\"}[$__rate_interval])) by (instance, name)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Queue Depth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How fast we are scheduling new actions to be performed by controller.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "hiddenSeries": false, + "id": 5, + "interval": "4m", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_adds_total{job=\"apiserver\" ,instance=~\"$instance\", cluster=\"$cluster\"}[$__rate_interval])) by (instance, name)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}} {{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Queue Add Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 11, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 24 + }, + "hiddenSeries": false, + "id": 12, + "interval": "4m", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}[$__rate_interval])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 24 + }, + "hiddenSeries": false, + "id": 13, + "interval": "", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": "node", + "multi": false, + "name": "instance", + "options": [], + "query": { + "query": "label_values(process_resident_memory_bytes{cluster=\"$cluster\"}, instance)", + "refId": "Observatorium-instance-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes / API server", + "uid": "09ec8aa1e996d6ffcd6817bbaff4db1b", + "version": 44 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-apiserver + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml new file mode 100644 index 000000000..560a22525 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml @@ -0,0 +1,2124 @@ +apiVersion: v1 +data: + k8s-compute-resources-cluster.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Prometheus-operator dashboard", + "editable": true, + "gnetId": 12114, + "graphTooltip": 0, + "id": 3, + "iteration": 1621617333424, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 20, + "panels": [], + "repeat": null, + "title": "Headlines", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 1, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 1 + }, + "id": 2, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 1 + }, + "id": 3, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Limits Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 1 + }, + "id": 4, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 1 + }, + "id": 5, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 1 + }, + "id": 6, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Limits Commitment", + "type": "stat" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 21, + "panels": [], + "repeat": null, + "title": "CPU", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 22, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 400 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Pods" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals" + }, + { + "id": "custom.width", + "value": 100 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "custom.width", + "value": 100 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals" + }, + { + "id": "displayName", + "value": "Workloads" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to workloads", + "url": "/d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 8, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "namespace", + "Value #A", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 23, + "panels": [], + "repeat": null, + "title": "Memory", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 28 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 24, + "panels": [], + "repeat": null, + "title": "Memory Requests", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "custom.width", + "value": 400 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "custom.width", + "value": 100 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to pods", + "url": "/d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "displayName", + "value": "Pods" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "custom.width", + "value": 100 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "decimals" + }, + { + "id": "links", + "value": [ + { + "title": "Drill down to Workloads", + "url": "/d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "displayName", + "value": "Workloads" + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 10, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests by Namespace", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "namespace", + "Value #A", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 25, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Received" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Transmitted" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Received Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Transmitted Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "d/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&refresh=1m&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 300 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 49 + }, + "id": 32, + "minSpan": 24, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #G", + "Value #H", + "instance" + ] + } + } + } + ], + "type": "table" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 28, + "interval": "1m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:58", + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:59", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 58 + }, + "hiddenSeries": false, + "id": 30, + "interval": "1m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:383", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:384", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes / Compute Resources / Cluster", + "uid": "efa86fd1d0c121a26444b636a3f509a8", + "version": 4 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-cluster + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml new file mode 100644 index 000000000..3237fb2d6 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml @@ -0,0 +1,1478 @@ +apiVersion: v1 +data: + k8s-compute-resources-namespace-pods.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12117, + "graphTooltip": 0, + "id": 6, + "iteration": 1621393773054, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Headlines", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "No Data", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 1, + "interval": "", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from requests)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "No Data", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 2, + "interval": "", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from limits)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "No Data", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 3, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) / sum(kube_pod_container_resource_requests{namespace=\"$namespace\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilization (from requests)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "rgb(255, 255, 255)", + "mode": "fixed" + }, + "custom": {}, + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "No Data", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 4, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) / sum(kube_pod_container_resource_limits{namespace=\"$namespace\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation (from limits)", + "type": "stat" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 17, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 18, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "pod" + }, + "properties": [ + { + "id": "displayName", + "value": "Pod" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down", + "url": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=${__data.fields.pod}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 6, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "pod", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E" + ] + } + } + }, + { + "id": "merge", + "options": {} + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 19, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 7, + "interval": null, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 20, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (RSS)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Cache)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Swap)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "pod" + }, + "properties": [ + { + "id": "displayName", + "value": "Pod" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down", + "url": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=${__data.fields.pod}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 8, + "interval": "4m", + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests{namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits{namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "pod", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #H" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": ["kubernetes-mixin"], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "Observatorium-namespace-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["1m", "5m", "15m", "30m", "1h", "2h", "1d"], + "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + }, + "timezone": "browser", + "title": "Kubernetes / Compute Resources / Namespace (Pods)", + "uid": "85a562078cdf77779eaa1add43ccec1e", + "version": 2 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-namespace-pods + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml new file mode 100644 index 000000000..e69e0a4d7 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml @@ -0,0 +1,1019 @@ +apiVersion: v1 +data: + k8s-compute-resources-namespace-workloads.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12118, + "graphTooltip": 0, + "id": 5, + "iteration": 1621618561596, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{workload}} - {{workload_type}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 15, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": "left", + "filterable": false + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Workload" + }, + "properties": [ + { + "id": "custom.align", + "value": "left" + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to workload", + "url": "/d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=${__data.fields[workload]}&var-type=$type" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU Requests %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU Limits %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Workload Type" + }, + "properties": [ + { + "id": "custom.width", + "value": 150 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Running Pods" + }, + "properties": [ + { + "id": "custom.width", + "value": 120 + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 2, + "links": [], + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "CPU Requests %" + } + ] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "merge", + "options": {} + }, + { + "id": "seriesToColumns", + "options": { + "byField": "workload" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "workload": false + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "Running Pods", + "Value #B": "CPU Usage", + "Value #C": "CPU Requests", + "Value #D": "CPU Requests %", + "Value #E": "CPU Limits", + "Value #F": "CPU Limits %", + "workload": "Workload", + "workload_type": "Workload Type" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{workload}} - {{workload_type}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 17, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": "left", + "filterable": false + }, + "decimals": 0, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Workload" + }, + "properties": [ + { + "id": "custom.align", + "value": null + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to workload", + "url": "/d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=${__data.fields[workload]}&var-type=$type" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Usage" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Requests" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Requests %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Limits" + }, + "properties": [ + { + "id": "unit", + "value": "bytes" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Limits %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Workload Type" + }, + "properties": [ + { + "id": "custom.width", + "value": 150 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Running Pods" + }, + "properties": [ + { + "id": "custom.width", + "value": 120 + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 4, + "links": [], + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Workload" + } + ] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "merge", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Running Pods", + "Value #B": "Memory Usage", + "Value #C": "Memory Requests", + "Value #D": "Memory Requests %", + "Value #E": "Memory Limits", + "Value #F": "Memory Limits %", + "workload": "Workload", + "workload_type": "Workload Type" + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Type", + "multi": false, + "name": "type", + "options": [], + "query": { + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes / Compute Resources / Namespace (Workloads)", + "uid": "a87fb0d919ec0ea5f6543124e16c42a5", + "version": 3 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-namespace-workloads + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml new file mode 100644 index 000000000..968b8fd1c --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml @@ -0,0 +1,1044 @@ +apiVersion: v1 +data: + k8s-compute-resources-node-pods.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12119, + "graphTooltip": 0, + "id": 7, + "iteration": 1619465861537, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 5, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=\"$node\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 6, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "pod" + }, + "properties": [ + { + "id": "displayName", + "value": "Pod" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 2, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=\"$node\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=\"$node\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=\"$node\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=\"$node\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=\"$node\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=\"$node\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "pod", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 7, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", node=\"$node\", container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 8, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (RSS)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Cache)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Swap)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "noValue", + "value": "-" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "pod" + }, + "properties": [ + { + "id": "displayName", + "value": "Pod" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 4, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=\"$node\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests{node=\"$node\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=\"$node\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits{node=\"$node\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "pod", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #H" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": ["kubernetes-mixin"], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, node)", + "refId": "Observatorium-node-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["1m", "5m", "15m", "30m", "1h", "2h", "1d"], + "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + }, + "timezone": "browser", + "title": "Kubernetes / Compute Resources / Node (Pods)", + "uid": "200ac8fdbfbb74b39aff88118e4d1c2c", + "version": 3 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-node-pods + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml new file mode 100644 index 000000000..f1a21498f --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml @@ -0,0 +1,1298 @@ +apiVersion: v1 +data: + k8s-compute-resources-pod.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12120, + "graphTooltip": 0, + "id": 7, + "iteration": 1621618845678, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "requests", + "color": "#F2495C", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + }, + { + "alias": "limits", + "color": "#FF9830", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"})\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"})\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 13, + "panels": [], + "repeat": null, + "title": "CPU Throttling", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 2, + "interval": "4m", + "legend": { + "avg": false, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}[5m])) by (container) /sum(increase(container_cpu_cfs_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}[5m])) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.25, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Throttling", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "container" + }, + "properties": [ + { + "id": "displayName", + "value": "Container" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 3, + "links": [], + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "container", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 15, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\"}) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"})\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"})\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (RSS)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Cache)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Swap)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "container" + }, + "properties": [ + { + "id": "displayName", + "value": "Container" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 5, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_requests{namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", resource=\"memory\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container) / sum(kube_pod_container_resource_limits{namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "container", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #H" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Pod", + "uid": "6581e46e4e5c7ba40a07646395ef7b23", + "version": 2 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-pod + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml new file mode 100644 index 000000000..1562edbbd --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml @@ -0,0 +1,865 @@ +apiVersion: v1 +data: + k8s-compute-resources-workload.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12121, + "graphTooltip": 0, + "id": 8, + "iteration": 1621618974171, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$dashboard", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$dashboard", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 15, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": "left", + "filterable": false + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Pod" + }, + "properties": [ + { + "id": "custom.align", + "value": null + }, + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Drill down to pod", + "url": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=${__data.fields[pod]}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU Requests %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "CPU Limits %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 2, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "pod" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Time": "", + "Value #A": "CPU Usage", + "Value #B": "CPU Requests", + "Value #C": "CPU Requests %", + "Value #D": "CPU Limits", + "Value #E": "CPU Limits %", + "pod": "Pod" + } + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Pod", + "CPU Usage", + "CPU Requests", + "CPU Requests %", + "CPU Limits", + "CPU Limits %" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$dashboard", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$dashboard", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 17, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "align": "left", + "filterable": false + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Pod" + }, + "properties": [ + { + "id": "custom.align", + "value": null + }, + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Drill down to pod", + "url": "/d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=${__data.fields[pod]}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Requests %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Memory Limits %" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 4, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "pod" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Memory Usage", + "Value #B": "Memory Requests", + "Value #C": "Memory Requests %", + "Value #D": "Memory Limits", + "Value #E": "Memory Limits %", + "pod": "Pod" + } + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Pod", + "Memory Usage", + "Memory Requests", + "Memory Requests %", + "Memory Limits", + "Memory Limits %" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Prometheus-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "Prometheus-namespace-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Workload", + "multi": false, + "name": "workload", + "options": [], + "query": { + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Type", + "multi": false, + "name": "type", + "options": [], + "query": { + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes / Compute Resources / Workload", + "uid": "a164a7f0339f99e89cea5cb47e9be617", + "version": 2 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-compute-resources-workload + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-etcd.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-etcd.yaml new file mode 100644 index 000000000..a13cac0cc --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-etcd.yaml @@ -0,0 +1,1367 @@ +apiVersion: v1 +data: + k8s-etcd.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "etcd dashboard", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "iteration": 1620846460165, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 28, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": ["mean"], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(etcd_server_has_leader{cluster=\"$cluster\",job=\"etcd\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "metric": "etcd_server_has_leader", + "refId": "A", + "step": 20 + } + ], + "title": "Up", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 10, + "x": 6, + "y": 0 + }, + "hiddenSeries": false, + "id": 23, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{cluster=\"$cluster\",job=\"etcd\",grpc_type=\"unary\"}[$__rate_interval]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "RPC Rate", + "metric": "grpc_server_started_total", + "refId": "A", + "step": 2 + }, + { + "expr": "rpc_rate:grpc_server_handled_total:sum_rate{cluster=\"$cluster\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "RPC Failed Rate", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 41, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "active_streams_watch:grpc_server_handled_total:sum{cluster=\"$cluster\"}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Watch Streams", + "metric": "grpc_server_handled_total", + "refId": "A", + "step": 4 + }, + { + "expr": "active_streams_lease:grpc_server_handled_total:sum{cluster=\"$cluster\"}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Lease Streams", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_debugging_mvcc_db_total_size_in_bytes{cluster=\"$cluster\",job=\"etcd\"}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} DB Size", + "metric": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DB Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{cluster=\"$cluster\",job=\"etcd\"}[$__rate_interval])) by (instance, le))", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} WAL fsync", + "metric": "etcd_disk_wal_fsync_duration_seconds_bucket", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{cluster=\"$cluster\",job=\"etcd\"}[$__rate_interval])) by (instance, le))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} DB fsync", + "metric": "etcd_disk_backend_commit_duration_seconds_bucket", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 29, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{cluster=\"$cluster\"}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Resident Memory", + "metric": "process_resident_memory_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 5, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 22, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_received_bytes_total{cluster=\"$cluster\", job=\"etcd\"}[$__rate_interval])", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Client Traffic In", + "metric": "etcd_network_client_grpc_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Client Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 5, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 6, + "y": 14 + }, + "hiddenSeries": false, + "id": 21, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_sent_bytes_total{cluster=\"$cluster\", job=\"etcd\"}[$__rate_interval])", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Client Traffic Out", + "metric": "etcd_network_client_grpc_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Client Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 14 + }, + "hiddenSeries": false, + "id": 20, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_received_bytes_total{cluster=\"$cluster\", job=\"etcd\"}[$__rate_interval])) by (instance)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Peer Traffic In", + "metric": "etcd_network_peer_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Peer Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 14 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_sent_bytes_total{cluster=\"$cluster\", job=\"etcd\"}[$__rate_interval])) by (instance)", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Peer Traffic Out", + "metric": "etcd_network_peer_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Peer Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 21 + }, + "hiddenSeries": false, + "id": 40, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_failed_total{cluster=\"$cluster\", job=\"etcd\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Proposal Failure Rate", + "metric": "etcd_server_proposals_failed_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(etcd_server_proposals_pending{cluster=\"$cluster\", job=\"etcd\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Proposal Pending Total", + "metric": "etcd_server_proposals_pending", + "refId": "B", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_committed_total{job=\"$cluster\"}[$__rate_interval]))", + "intervalFactor": 2, + "legendFormat": "Proposal Commit Rate", + "metric": "etcd_server_proposals_committed_total", + "refId": "C", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total{job=\"$cluster\"}[$__rate_interval]))", + "intervalFactor": 2, + "legendFormat": "Proposal Apply Rate", + "refId": "D", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": 0, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 21 + }, + "hiddenSeries": false, + "id": 19, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "changes(etcd_server_leader_changes_seen_total{cluster=\"$cluster\", job=\"etcd\"}[1d])", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}} Total Leader Elections Per Day", + "metric": "etcd_server_leader_changes_seen_total", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Leader Elections Per Day", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "1m", + "schemaVersion": 27, + "style": "dark", + "tags": ["OpenShift", "kubernetes-mixin"], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(etcd_server_has_leader,cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(etcd_server_has_leader,cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "now": true, + "refresh_intervals": ["1m", "5m", "15m", "30m", "1h", "2h", "1d"], + "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + }, + "timezone": "browser", + "title": "Kubernetes / etcd Cluster", + "uid": "N8BxQ2jMz", + "version": 4 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-etcd-cluster + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml new file mode 100644 index 000000000..95c10a945 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml @@ -0,0 +1,1177 @@ +apiVersion: v1 +data: + k8s-networking-cluster.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12124, + "graphTooltip": 0, + "id": 12, + "iteration": 1621460740486, + "links": [], + "panels": [ + { + "collapse": false, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 3, + "interval": "1m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:58", + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:59", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 4, + "interval": "1m", + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "minSpan": 24, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:383", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:384", + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Received" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Current Bandwidth Transmitted" + }, + { + "id": "unit", + "value": "Bps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Received Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Rate of Transmitted Packets Dropped" + }, + { + "id": "unit", + "value": "pps" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "namespace" + }, + "properties": [ + { + "id": "displayName", + "value": "Namespace" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "links", + "value": [ + { + "targetBlank": false, + "title": "Drill down to pods", + "url": "d/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&refresh=1m&var-namespace=${__data.fields.namespace}" + } + ] + }, + { + "id": "custom.align", + "value": null + }, + { + "id": "custom.width", + "value": 300 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "minSpan": 24, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #G", + "Value #H", + "instance" + ] + } + } + } + ], + "type": "table" + }, + { + "collapse": true, + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 15, + "panels": [], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 16, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 4, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1132", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:1133", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 29 + }, + "hiddenSeries": false, + "id": 17, + "interval": "2m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "minSpan": 24, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1290", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:1291", + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "What is TCP Retransmit?", + "url": "https://accedian.com/enterprises/blog/network-packet-loss-retransmissions-and-duplicate-acknowledgements/" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_OutSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of TCP Retransmits out of all sent segments", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1368", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:1369", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 47 + }, + "hiddenSeries": false, + "id": 19, + "interval": "2m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "Why monitor SYN retransmits?", + "url": "https://github.com/prometheus/node_exporter/issues/1023#issuecomment-408128365" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_TcpExt_TCPSynRetrans{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of TCP SYN Retransmits out of all retransmits", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1668", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:1669", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "4h", + "value": "4h" + }, + "datasource": "$datasource", + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "1m,5m,1h", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes / Networking / Cluster", + "uid": "ff635a025bcfea7bc3dd4f508990a3e9", + "version": 19 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-networking-cluster + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server-cluster.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server-cluster.yaml new file mode 100644 index 000000000..76b800288 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server-cluster.yaml @@ -0,0 +1,1064 @@ +apiVersion: v1 +data: + k8s-service-level-overview-api-server-cluster.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Service-level overview for the Kubernetes API server for an individual cluster within the fleet.", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 13, + "iteration": 1633958242996, + "links": [ + { + "icon": "dashboard", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Kubernetes / Service-Level Overview / API Server", + "tooltip": "", + "type": "link", + "url": "./d/L8KwmaR7nz/kubernetes-service-level-overview-api-server?orgId=1&refresh=5m" + } + ], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 12, + "panels": [], + "title": "Service-Level Overview - Kubernetes API Server Request Duration", + "type": "row" + }, + { + "datasource": null, + "description": "The service-level target for the API server request duration service-level objective (SLO).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "rgb(255, 255, 255)", + "value": 0 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 51, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/^target$/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}", + "format": "table", + "instant": true, + "interval": "5m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Target", + "transformations": [], + "type": "stat" + }, + { + "datasource": null, + "description": "Service-level objective (SLO) status from over a 7 days period. (The SLO is calculated from # of request duration >= target / total count of request durations)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "noValue": "-", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "#EAB839", + "value": 95 + }, + { + "color": "green", + "value": 99 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 1 + }, + "hideTimeOverride": true, + "id": 35, + "interval": "5m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])", + "format": "table", + "instant": true, + "interval": "5m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "7d", + "timeShift": null, + "title": "Past 7 Days", + "transformations": [], + "type": "stat" + }, + { + "datasource": null, + "description": "Service-level objective (SLO) status from over a 30 days period. (The SLO is calculated from # of request duration >= target / total count of request durations)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "#EAB839", + "value": 95 + }, + { + "color": "green", + "value": 99 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 1 + }, + "hideTimeOverride": true, + "id": 32, + "interval": "5m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])", + "format": "table", + "instant": true, + "interval": "5m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "30d", + "timeShift": null, + "title": "Past 30 Days", + "transformations": [], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 73, + "panels": [], + "title": "Error Budget for 7 Days", + "type": "row" + }, + { + "datasource": null, + "description": "The current day within the week period.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "", + "to": "", + "type": 1, + "value": "" + } + ], + "max": 7, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "orange", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 6 + }, + "id": 87, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "day_of_week()", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Day of the week", + "type": "gauge" + }, + { + "datasource": null, + "description": "The amount of error budget that has been consumed for the API server request duration service-level objective (SLO).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 6 + }, + "hideTimeOverride": true, + "id": 89, + "interval": "1m", + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "((0.9900 - floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / 1)", + "format": "table", + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "7d", + "timeShift": "0M/M", + "title": "Error Budget (Past 7 Days)", + "transformations": [], + "type": "gauge" + }, + { + "datasource": null, + "description": "The time remaining within the 7d period in which the cluster can afford downtime.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "0", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "rgb(255, 255, 255)", + "value": 0 + } + ] + }, + "unit": "m" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 6 + }, + "hideTimeOverride": true, + "id": 84, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(0.9900 - floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) * (7 * 24 * 60) * -1", + "format": "table", + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "7d", + "timeShift": null, + "title": "Downtime Remaining (Past 7-days)", + "transformations": [], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 92, + "panels": [], + "title": "Error Budget for 30 Days", + "type": "row" + }, + { + "datasource": null, + "description": "The current day within the month period.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [], + "max": 31, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "orange", + "value": null + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 12 + }, + "id": 88, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "day_of_month()", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Day of the month", + "type": "gauge" + }, + { + "datasource": null, + "description": "The amount of error budget that has been consumed for the API server request duration service-level objective (SLO).", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 50 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 12 + }, + "hideTimeOverride": true, + "id": 90, + "interval": "1m", + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "((0.9900 - floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / 1)", + "format": "table", + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "30d", + "timeShift": "0M/M", + "title": "Error Budget (Past 30 Days)", + "transformations": [], + "type": "gauge" + }, + { + "datasource": null, + "description": "The time remaining within the 30d period in which the cluster can afford downtime.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "rgb(255, 255, 255)", + "value": 0 + } + ] + }, + "unit": "m" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 12 + }, + "hideTimeOverride": true, + "id": 85, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(0.9900 - floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=\"$cluster\"}[$__range])) * (30 * 24 * 60) * -1", + "format": "table", + "hide": false, + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "30d", + "timeShift": null, + "title": "Downtime Remaining (Past 30-days)", + "transformations": [], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 21, + "panels": [], + "title": "Trend", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "description": "Trending graph of the service-level indicators (SLI) over relative time period used to compute the service-level objective (SLO).", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "0.9900" + }, + "properties": [ + { + "id": "displayName", + "value": "Target Threshold" + } + ] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "hideTimeOverride": false, + "id": 8, + "interval": "1m", + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 1, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sli:apiserver_request_duration_seconds:trend:1m{cluster=\"$cluster\"}", + "hide": false, + "interval": "1m", + "legendFormat": "", + "refId": "A" + }, + { + "expr": "0.9900", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "API Server Request Duration - SLI", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:2055", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0.8", + "show": true + }, + { + "$$hashKey": "object:2056", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "description": "The collected service-level indicator (SLI) values for the API server request duration service-level objective (SLO), over the relative time range. (# of data points are subjected to change based upon the interval set by Grafana)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "filterable": false + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "#EAB839", + "value": 99 + }, + { + "color": "green", + "value": 99.5 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "SLI" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "color-background" + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 62, + "interval": "1m", + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sli:apiserver_request_duration_seconds:trend:1m{cluster=\"$cluster\"}", + "format": "table", + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeShift": null, + "title": "API Server Request Duration - SLI", + "transformations": [ + { + "id": "groupBy", + "options": { + "fields": { + "Time": { + "aggregations": [], + "operation": "groupby" + }, + "Value": { + "aggregations": [], + "operation": "groupby" + }, + "cluster": { + "aggregations": [], + "operation": "groupby" + }, + "clusterID": { + "aggregations": [], + "operation": "groupby" + } + } + } + }, + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "Value": "SLI", + "cluster": "Cluster", + "clusterID": "ClusterID" + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "SLI", + "SLO" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": null, + "definition": "label_values(sli:apiserver_request_duration_seconds:trend:1m, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(sli:apiserver_request_duration_seconds:trend:1m, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Kubernetes / Service-Level Overview / API Server / Cluster", + "uid": "3TwB9bRnnz", + "version": 4 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-service-level-overview-api-server-cluster + namespace: open-cluster-management-observability + labels: + general-folder: 'true' \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server.yaml new file mode 100644 index 000000000..70b613137 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-service-level-overview-api-server.yaml @@ -0,0 +1,670 @@ +apiVersion: v1 +data: + k8s-service-level-overview.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Service-level overview for the Kubernetes API server at the fleet level.", + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 15, + "iteration": 1632264737387, + "links": [ + { + "icon": "dashboard", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Kubernetes / Service-Level Overview / API Server / Cluster", + "type": "link", + "url": "./d/3TwB9bRnnz/kubernetes-service-level-overview-api-server-cluster?orgId=1&refresh=5m" + } + ], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 35, + "panels": [], + "title": "Fleet Overview ($cluster) - $window", + "type": "row" + }, + { + "datasource": null, + "description": "A total number of the clusters that have exceeded their service-level objective (SLO) target.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgb(255, 255, 255)", + "value": null + }, + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 12, + "x": 0, + "y": 1 + }, + "hideTimeOverride": true, + "id": 39, + "interval": null, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "sum(floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range]) < bool 0.99)", + "format": "table", + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "$window", + "timeShift": null, + "title": "Number of cluster that has exceeded the SLO", + "transformations": [], + "type": "stat" + }, + { + "datasource": null, + "description": "A total number of clusters that haven't breached the service-level objective (SLO) target.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": {}, + "mappings": [ + { + "from": "", + "id": 1, + "text": "N/A", + "to": "", + "type": 1, + "value": "null" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgb(255, 255, 255)", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 1 + }, + "hideTimeOverride": true, + "id": 42, + "interval": null, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "expr": "(sum(floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range]) >= bool 0.99))", + "format": "table", + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": "$window", + "timeShift": null, + "title": "Number of clusters that are meeting the SLO", + "transformations": [], + "type": "stat" + }, + { + "datasource": null, + "description": "List of the topk cluster over a $window period. The results are sorted from worst offending clusters to passing clusters.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "left", + "displayMode": "auto", + "filterable": false + }, + "decimals": 2, + "mappings": [ + { + "from": "", + "id": 1, + "text": "-", + "to": "", + "type": 1, + "value": "null" + } + ], + "noValue": "-", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 95 + }, + { + "color": "green", + "value": 99.5 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Cluster" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "targetBlank": true, + "title": "Kubernetes / Service-Level Overview / API Server / Cluster", + "url": "./d/3TwB9bRnnz/kubernetes-service-level-overview-api-server-cluster?orgId=1&var-cluster=${__data.fields.cluster}&${__url_time_range}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "SLO" + }, + "properties": [ + { + "id": "custom.displayMode", + "value": "color-background" + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 44, + "interval": "1m", + "options": { + "frameIndex": 0, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "7.4.2", + "targets": [ + { + "exemplar": false, + "expr": "sort_desc(bottomk($top, floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range])))", + "format": "table", + "hide": false, + "instant": true, + "interval": "1m", + "legendFormat": "", + "refId": "A" + }, + { + "expr": "(0.99 - sort_desc(bottomk($top, floor(sum_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range])) / count_over_time(sli:apiserver_request_duration_seconds:bin:trend:1m{cluster=~\"$cluster\"}[$__range]))))", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": "$window", + "timeShift": null, + "title": "Top Clusters ($top)", + "transformations": [ + { + "id": "merge", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "clusterID": true, + "receive": true, + "tenant_id": true + }, + "indexByName": { + "Time": 0, + "Value #A": 7, + "Value #B": 6, + "cluster": 1, + "clusterID": 2, + "receive": 3, + "target": 4, + "tenant_id": 5 + }, + "renameByName": { + "Value #A": "SLO", + "Value #B": "Error Budget", + "cluster": "Cluster", + "target": "Target" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 41, + "panels": [], + "title": "API Server Request Duration - Status ($cluster)", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "description": "The service-level indicator (SLI) trend of the topk clusters over a relative time. The results are sorted from worst offending clusters to passing clusters.", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 24, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.2", + "pointradius": 1, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "bottomk($top, sli:apiserver_request_duration_seconds:trend:1m{cluster=~\"$cluster\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "1m", + "legendFormat": "", + "refId": "A" + }, + { + "expr": "0.9900", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Top Cluster's SLI Trend ($top)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Target": true + }, + "indexByName": {}, + "renameByName": { + "0.9900": "Target Threshold", + "Target": "" + } + } + } + ], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:942", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0.8", + "show": true + }, + { + "$$hashKey": "object:943", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 27, + "style": "dark", + "tags": [ + "SLI", + "SLO" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": null, + "definition": "label_values(sli:apiserver_request_duration_seconds:trend:1m, cluster)", + "description": "Clusters within the fleet.", + "error": null, + "hide": 0, + "includeAll": true, + "label": "Cluster", + "multi": true, + "name": "cluster", + "options": [ + { + "selected": false, + "text": "All", + "value": "$__all" + }, + { + "selected": true, + "text": "local-cluster", + "value": "local-cluster" + } + ], + "query": { + "query": "label_values(sli:apiserver_request_duration_seconds:trend:1m, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 0, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "7d", + "value": "7d" + }, + "description": "Time window for which the service level objective (SLO) is evaluated over.", + "error": null, + "hide": 0, + "label": "Window", + "name": "window", + "options": [ + { + "selected": true, + "text": "7d", + "value": "7d" + }, + { + "selected": false, + "text": "30d", + "value": "30d" + } + ], + "query": "7d,30d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + }, + { + "allValue": "100000", + "current": { + "selected": true, + "text": "20", + "value": "20" + }, + "description": "Top (n) number of clusters that have exceeded the SLO.", + "error": null, + "hide": 0, + "includeAll": true, + "label": "Top", + "multi": false, + "name": "top", + "options": [ + { + "selected": false, + "text": "All", + "value": "$__all" + }, + { + "selected": false, + "text": "5", + "value": "5" + }, + { + "selected": false, + "text": "10", + "value": "10" + }, + { + "selected": true, + "text": "20", + "value": "20" + }, + { + "selected": false, + "text": "50", + "value": "50" + }, + { + "selected": false, + "text": "100", + "value": "100" + } + ], + "query": "5, 10, 20, 50, 100", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Kubernetes / Service-Level Overview / API Server", + "uid": "L8KwmaR7nz", + "version": 7 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-k8s-service-level-overview + namespace: open-cluster-management-observability + labels: + general-folder: 'true' \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml new file mode 100644 index 000000000..68909ae10 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml @@ -0,0 +1,1141 @@ +apiVersion: v1 +data: + node-rsrc-use.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "http://www.brendangregg.com/USEmethod/use-linux.html", + "editable": true, + "gnetId": 12136, + "graphTooltip": 0, + "id": 13, + "iteration": 1601604476811, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "repeat": null, + "title": "CPU", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_cpu_utilisation:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Utilisation", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "hiddenSeries": false, + "id": 2, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_load1_per_cpu:ratio{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Saturation", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Saturation (Load1 per CPU)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 11, + "panels": [], + "repeat": null, + "title": "Memory", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 3, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_memory_utilisation:ratio{cluster=\"$cluster\", job=\"node-exporter\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Memory", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_vmstat_pgmajfault:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Major page faults", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Saturation (Major Page Faults)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "Net", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 17 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/Receive/", + "stack": "A" + }, + { + "alias": "/Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Receive", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Transmit", + "legendLink": null, + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Net Utilisation (Bytes Receive/Transmit)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 17 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/Receive/", + "stack": "A" + }, + { + "alias": "/Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Receive drops", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Transmit drops", + "legendLink": null, + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Net Saturation (Drops Receive/Transmit)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 13, + "panels": [], + "repeat": null, + "title": "Disk IO", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_seconds:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{device}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 25 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{device}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "Disk Space", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 33 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 -\n(\n max without (mountpoint, fstype) (node_filesystem_avail_bytes{cluster=\"$cluster\", job=\"node-exporter\", fstype!=\"\", instance=\"$instance\"})\n/\n max without (mountpoint, fstype) (node_filesystem_size_bytes{cluster=\"$cluster\", job=\"node-exporter\", fstype!=\"\", instance=\"$instance\"})\n)\n", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{device}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Space Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(cluster)", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(cluster)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(up{cluster=\"$cluster\", job=\"node-exporter\"}, instance)", + "hide": 0, + "includeAll": false, + "label": "Instance", + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(up{cluster=\"$cluster\", job=\"node-exporter\"}, instance)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "USE Method / Node", + "uid": "fac67cfbe174d3ef53eb473d73d9212f", + "version": 3 + } +kind: ConfigMap +metadata: + name: grafana-dashboard-node-rsrc-use + namespace: open-cluster-management-observability + labels: + general-folder: 'true' diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml new file mode 100644 index 000000000..4459eea38 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml @@ -0,0 +1,92 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + name: grafana + namespace: open-cluster-management-observability +spec: + replicas: 2 + selector: + matchLabels: + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + template: + metadata: + labels: + app: multicluster-observability-grafana + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 70 + podAffinityTerm: + topologyKey: topology.kubernetes.io/zone + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-grafana + - weight: 30 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - multicluster-observability-grafana + containers: + - args: + - -config=/etc/grafana/grafana.ini + image: quay.io/stolostron/grafana:2.4.0-SNAPSHOT-2021-09-23-07-02-14 + imagePullPolicy: Always + name: grafana + ports: + - containerPort: 3001 + name: http + protocol: TCP + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 4m + memory: 100Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-storage + - mountPath: /etc/grafana/provisioning/datasources + name: grafana-datasources + - mountPath: /etc/grafana + name: grafana-config + - name: grafana-dashboard-loader + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/stolostron/grafana-dashboard-loader:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: Always + resources: + requests: + cpu: 4m + memory: 50Mi + serviceAccount: grafana + imagePullSecrets: + - name: multiclusterhub-operator-pull-secret + serviceAccountName: grafana + volumes: + - emptyDir: {} + name: grafana-storage + - name: grafana-datasources + secret: + defaultMode: 420 + secretName: grafana-datasources + - name: grafana-config + secret: + defaultMode: 420 + secretName: grafana-config diff --git a/operators/multiclusterobservability/manifests/base/grafana/ingress.yaml b/operators/multiclusterobservability/manifests/base/grafana/ingress.yaml new file mode 100644 index 000000000..532b4a108 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/ingress.yaml @@ -0,0 +1,18 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + ingress.open-cluster-management.io/rewrite-target: / + kubernetes.io/ingress.class: ingress-open-cluster-management + name: grafana +spec: + rules: + - http: + paths: + - backend: + service: + name: grafana + port: + number: 3001 + path: /grafana + pathType: ImplementationSpecific diff --git a/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml b/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml new file mode 100644 index 000000000..247ea1827 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml @@ -0,0 +1,24 @@ +resources: +- cluster-role.yaml +- cluster-role-binding.yaml +- service-account.yaml +- config.yaml +- deployment.yaml +- ingress.yaml +- service.yaml +- dash-acm-optimization-overview.yaml +- dash-acm-clusters-overview.yaml +- dash-acm-clusters-overview-ocp311.yaml +- dash-k8s-etcd.yaml +- dash-k8s-apiserver.yaml +- dash-k8s-networking-cluster.yaml +- dash-k8s-compute-resources-cluster.yaml +- dash-k8s-compute-resources-namespace-pods.yaml +- dash-k8s-compute-resources-namespace-workloads.yaml +- dash-k8s-compute-resources-workload.yaml +- dash-k8s-compute-resources-node-pods.yaml +- dash-k8s-compute-resources-pod.yaml +- dash-k8s-service-level-overview-api-server.yaml +- dash-k8s-service-level-overview-api-server-cluster.yaml +- dash-cluster-rsrc-use.yaml +- dash-node-rsrc-use.yaml diff --git a/operators/multiclusterobservability/manifests/base/grafana/service-account.yaml b/operators/multiclusterobservability/manifests/base/grafana/service-account.yaml new file mode 100644 index 000000000..779723e6c --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/service-account.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + serviceaccounts.openshift.io/oauth-redirectreference.grafana: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana"}}' + name: grafana + namespace: open-cluster-management-observability diff --git a/operators/multiclusterobservability/manifests/base/grafana/service-monitor.yaml b/operators/multiclusterobservability/manifests/base/grafana/service-monitor.yaml new file mode 100644 index 000000000..0b9a7c3e4 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/service-monitor.yaml @@ -0,0 +1,17 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: grafana + namespace: open-cluster-management-observability +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + port: https + scheme: https + tlsConfig: + caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt + serverName: server-name-replaced-at-runtime + selector: + matchLabels: + app: multicluster-observability-grafana diff --git a/operators/multiclusterobservability/manifests/base/grafana/service.yaml b/operators/multiclusterobservability/manifests/base/grafana/service.yaml new file mode 100644 index 000000000..cf9ed2d3f --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: multicluster-observability-grafana + name: grafana + namespace: open-cluster-management-observability +spec: + ports: + - name: grafana-http + port: 3001 + protocol: TCP + targetPort: 3001 + selector: + app: multicluster-observability-grafana + type: ClusterIP diff --git a/operators/multiclusterobservability/manifests/base/observatorium/cluster_role.yaml b/operators/multiclusterobservability/manifests/base/observatorium/cluster_role.yaml new file mode 100644 index 000000000..f033585f3 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/cluster_role.yaml @@ -0,0 +1,109 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: observatorium-operator + app.kubernetes.io/version: v0.1 + name: open-cluster-management:observatorium-operator +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - core.observatorium.io + resources: + - observatoria + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - core.observatorium.io + resources: + - observatoria/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - configmaps + - secrets + - serviceaccounts + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + - roles + verbs: + - create + - get + - patch + - update + - list + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + - deployments + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "" + resources: + - services + - services/finalizers + - endpoints + verbs: + - get + - create + - update + - delete +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - '*' +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - create + - update + - delete diff --git a/operators/multiclusterobservability/manifests/base/observatorium/cluster_role_binding.yaml b/operators/multiclusterobservability/manifests/base/observatorium/cluster_role_binding.yaml new file mode 100644 index 000000000..544b28b5a --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/cluster_role_binding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: observatorium-operator + app.kubernetes.io/version: v0.1 + name: open-cluster-management:observatorium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:observatorium-operator +subjects: +- kind: ServiceAccount + name: observatorium + namespace: open-cluster-management-observability + diff --git a/operators/multiclusterobservability/manifests/base/observatorium/kustomization.yaml b/operators/multiclusterobservability/manifests/base/observatorium/kustomization.yaml new file mode 100644 index 000000000..7b3a243e8 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- cluster_role.yaml +- cluster_role_binding.yaml +- prometheus_role.yaml +- prometheus_role_binding.yaml +- service-account.yaml +- operator.yaml diff --git a/operators/multiclusterobservability/manifests/base/observatorium/operator.yaml b/operators/multiclusterobservability/manifests/base/observatorium/operator.yaml new file mode 100644 index 000000000..a724d34d8 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/operator.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: observatorium-operator + labels: + app.kubernetes.io/component: observatorium-operator + app.kubernetes.io/name: observatorium-operator + app.kubernetes.io/part-of: observatorium + control-plane: observatorium-operator + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" +spec: + replicas: 1 + selector: + matchLabels: + control-plane: observatorium-operator + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + template: + metadata: + labels: + control-plane: observatorium-operator + observability.open-cluster-management.io/name: "{{MULTICLUSTEROBSERVABILITY_CR_NAME}}" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - observatorium-operator + topologyKey: kubernetes.io/hostname + weight: 30 + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - observatorium-operator + topologyKey: topology.kubernetes.io/zone + weight: 70 + containers: + - args: + - --log-level=info + image: quay.io/stolostron/observatorium-operator:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: "{{MULTICLUSTEROBSERVABILITY_IMAGE_PULL_POLICY}}" + name: observatorium-operator + resources: + limits: + cpu: 100m + memory: 600Mi + requests: + cpu: 10m + memory: 50Mi + imagePullSecrets: + - name: "{{MULTICLUSTEROBSERVABILITY_IMAGE_PULL_SECRET}}" + serviceAccountName: observatorium diff --git a/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role.yaml b/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role.yaml new file mode 100644 index 000000000..47bb6b7fd --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role.yaml @@ -0,0 +1,15 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: open-cluster-management-observability:observatorium-monitoring +rules: + - verbs: + - get + - list + - watch + apiGroups: + - '' + resources: + - pods + - services + - endpoints diff --git a/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role_binding.yaml b/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role_binding.yaml new file mode 100644 index 000000000..2377cbda7 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/prometheus_role_binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: open-cluster-management-observability:observatorium-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: open-cluster-management-observability:observatorium-monitoring +subjects: +- kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring + diff --git a/operators/multiclusterobservability/manifests/base/observatorium/service-account.yaml b/operators/multiclusterobservability/manifests/base/observatorium/service-account.yaml new file mode 100644 index 000000000..24cb6ea1e --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/observatorium/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: observatorium + namespace: open-cluster-management-observability diff --git a/operators/multiclusterobservability/manifests/base/proxy/cluster-role-binding.yaml b/operators/multiclusterobservability/manifests/base/proxy/cluster-role-binding.yaml new file mode 100644 index 000000000..8aaa79012 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:rbac-query-proxy +subjects: +- kind: ServiceAccount + name: rbac-query-proxy + namespace: open-cluster-management-observability +roleRef: + kind: ClusterRole + name: open-cluster-management:rbac-query-proxy + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/proxy/cluster-role.yaml b/operators/multiclusterobservability/manifests/base/proxy/cluster-role.yaml new file mode 100644 index 000000000..06fc7d8dc --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/cluster-role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:rbac-query-proxy +rules: +- apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + verbs: + - list + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/proxy/cookie-secret.yaml b/operators/multiclusterobservability/manifests/base/proxy/cookie-secret.yaml new file mode 100644 index 000000000..96f32646e --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/cookie-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +data: {} +kind: Secret +metadata: + name: rbac-proxy-cookie-secret + namespace: open-cluster-management-observability + annotations: + skip-creation-if-exist: "true" +type: Opaque diff --git a/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml b/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml new file mode 100644 index 000000000..09b88f218 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml @@ -0,0 +1,136 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: rbac-query-proxy + observability.open-cluster-management.io/name: "{{MCO_CR_NAME}}" + name: rbac-query-proxy + namespace: open-cluster-management-observability +spec: + replicas: 2 + selector: + matchLabels: + app: rbac-query-proxy + observability.open-cluster-management.io/name: "{{MCO_CR_NAME}}" + template: + metadata: + labels: + app: rbac-query-proxy + observability.open-cluster-management.io/name: "{{MCO_CR_NAME}}" + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 70 + podAffinityTerm: + topologyKey: topology.kubernetes.io/zone + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rbac-query-proxy + - weight: 30 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - rbac-query-proxy + containers: + - name: rbac-query-proxy + image: quay.io/stolostron/rbac-query-proxy:2.3.0-SNAPSHOT-2021-07-26-18-43-26 + imagePullPolicy: Always + args: + - "--listen-address=0.0.0.0:8080" + - "--metrics-server=https://{{OBSERVATORIUM_NAME}}-observatorium-api.{{MCO_NAMESPACE}}.svc.cluster.local:8080/api/metrics/v1/default" + ports: + - containerPort: 8080 + name: http + volumeMounts: + - name: ca-certs + mountPath: /var/rbac_proxy/ca + - name: client-certs + mountPath: /var/rbac_proxy/certs + - name: probe-command + mountPath: /etc/probe + livenessProbe: + exec: + command: + - /bin/sh + - -c + - /etc/probe/check.sh + periodSeconds: 60 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + resources: + requests: + cpu: 20m + memory: 100Mi + - args: + - --provider=openshift + - --https-address=:8443 + - --http-address= + - --upstream=http://localhost:8080 + - '--client-id={{MCO_NAMESPACE}}' + - '--client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token' + - '--pass-user-bearer-token=true' + - '--pass-access-token=true' + - --openshift-sar={"resource":"namespaces","verb":"get"} + - --openshift-delegate-urls={"/":{"resource":"namespaces","verb":"get"}} + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --openshift-service-account=rbac-query-proxy + - --cookie-secret-file=/etc/proxy/secrets/session_secret + - --skip-provider-button=true + - --openshift-ca=/etc/pki/tls/cert.pem + - --openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + image: quay.io/stolostron/origin-oauth-proxy:2.0.11-SNAPSHOT-2021-04-29-18-29-17 + imagePullPolicy: Always + name: oauth-proxy + ports: + - containerPort: 8443 + name: oauth-proxy + protocol: TCP + resources: + requests: + cpu: 1m + memory: 20Mi + readinessProbe: + failureThreshold: 3 + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/tls/private + name: certs + readOnly: true + - mountPath: /etc/proxy/secrets + name: cookie-secret + serviceAccountName: rbac-query-proxy + imagePullSecrets: + - name: multiclusterhub-operator-pull-secret + volumes: + - name: certs + secret: + secretName: rbac-proxy-certs + - name: cookie-secret + secret: + secretName: rbac-proxy-cookie-secret + - name: ca-certs + secret: + secretName: observability-server-certs + - name: client-certs + secret: + secretName: observability-grafana-certs + - name: probe-command + configMap: + name: rbac-query-proxy-probe + defaultMode: 0777 \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/proxy/ingress.yaml b/operators/multiclusterobservability/manifests/base/proxy/ingress.yaml new file mode 100644 index 000000000..85e400f1a --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/ingress.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: rbac-query-proxy-ingress + annotations: + ingress.open-cluster-management.io/rewrite-target: / + kubernetes.io/ingress.class: "ingress-open-cluster-management" + ingress.open-cluster-management.io/auth-type: "access-token" + ingress.open-cluster-management.io/secure-backends: "true" +spec: + rules: + - http: + paths: + - path: "/observability-query" + backend: + service: + name: rbac-query-proxy + port: + number: 8443 + pathType: ImplementationSpecific diff --git a/operators/multiclusterobservability/manifests/base/proxy/kustomization.yaml b/operators/multiclusterobservability/manifests/base/proxy/kustomization.yaml new file mode 100644 index 000000000..bc7029ead --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- cluster-role-binding.yaml +- cluster-role.yaml +- cookie-secret.yaml +- deployment.yaml +- ingress.yaml +- prob-cmd-configmap.yaml +- service-account.yaml +- service.yaml diff --git a/operators/multiclusterobservability/manifests/base/proxy/prob-cmd-configmap.yaml b/operators/multiclusterobservability/manifests/base/proxy/prob-cmd-configmap.yaml new file mode 100644 index 000000000..b41e7a183 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/prob-cmd-configmap.yaml @@ -0,0 +1,14 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: rbac-query-proxy-probe +data: + check.sh: | + #!/bin/bash + if [ -e /tmp/health ] + then + cat /tmp/health + exit 1 + else + echo 0 + fi \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/proxy/service-account.yaml b/operators/multiclusterobservability/manifests/base/proxy/service-account.yaml new file mode 100644 index 000000000..93659c497 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rbac-query-proxy + namespace: open-cluster-management-observability \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/base/proxy/service.yaml b/operators/multiclusterobservability/manifests/base/proxy/service.yaml new file mode 100644 index 000000000..9173af640 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/proxy/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: rbac-query-proxy + name: rbac-query-proxy + namespace: open-cluster-management-observability + annotations: + service.beta.openshift.io/serving-cert-secret-name: rbac-proxy-certs +spec: + ports: + - name: https + port: 8443 + targetPort: oauth-proxy + - name: http + port: 8080 + targetPort: http + selector: + app: rbac-query-proxy diff --git a/operators/multiclusterobservability/manifests/base/thanos/kustomization.yaml b/operators/multiclusterobservability/manifests/base/thanos/kustomization.yaml new file mode 100644 index 000000000..0afa6a9f2 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/thanos/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- thanos-ruler-config.yaml +- thanos-ruler-clusterrole.yaml +- thanos-ruler-clusterrolebinding.yaml diff --git a/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrole.yaml b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrole.yaml new file mode 100644 index 000000000..584cbc603 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrole.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:thanos-rule +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get diff --git a/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrolebinding.yaml b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrolebinding.yaml new file mode 100644 index 000000000..b5b8ac176 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:thanos-rule +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: open-cluster-management:thanos-rule +subjects: +- kind: ServiceAccount + name: observability-thanos-rule + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-config.yaml b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-config.yaml new file mode 100644 index 000000000..5ace7185e --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/thanos/thanos-ruler-config.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +data: + config.yaml: | + alertmanagers: + - http_config: + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + tls_config: + ca_file: /etc/thanos/configmaps/alertmanager-ca-bundle/service-ca.crt + server_name: alertmanager.open-cluster-management-observability.svc + insecure_skip_verify: false + scheme: https + static_configs: + - alertmanager.open-cluster-management-observability.svc:9095 + api_version: v2 +kind: ConfigMap +metadata: + name: thanos-ruler-config + namespace: open-cluster-management diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/aggregate_role.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/aggregate_role.yaml new file mode 100644 index 000000000..9baa9b460 --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/aggregate_role.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aggregate-observabilityaddons-edit + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - verbs: + - get + - list + - watch + - create + - delete + - update + apiGroups: + - observability.open-cluster-management.io + resources: + - observabilityaddons \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml new file mode 100644 index 000000000..bfbad4dea --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml @@ -0,0 +1,11 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: images-list +data: + prometheus: "quay.io/stolostron/prometheus:2.4.0-SNAPSHOT-2021-08-19-08-22-43" + prometheus-config-reloader: "quay.io/openshift/origin-configmap-reloader:4.5.0" + kube_state_metrics: "quay.io/stolostron/kube-state-metrics:2.4.0-SNAPSHOT-2021-08-19-08-22-43" + node_exporter: "quay.io/stolostron/node-exporter:2.4.0-SNAPSHOT-2021-08-19-08-22-43" + kube_rbac_proxy: "quay.io/stolostron/kube-rbac-proxy:2.4.0-SNAPSHOT-2021-08-19-08-22-43" + metrics_collector: "quay.io/stolostron/metrics-collector:2.4.0-SNAPSHOT-2021-08-19-08-22-43" diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/kustomization.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/kustomization.yaml new file mode 100644 index 000000000..2df72f39e --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- aggregate_role.yaml +- images.yaml +- observability.open-cluster-management.io_observabilityaddon_crd.yaml +- observability.open-cluster-management.io_observabilityaddon_v1beta1_crd.yaml +- operator.yaml +- role.yaml +- role_binding.yaml +- service_account.yaml diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_crd.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_crd.yaml new file mode 100644 index 000000000..2ae127b7c --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_crd.yaml @@ -0,0 +1,120 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: observabilityaddons.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: ObservabilityAddon + listKind: ObservabilityAddonList + plural: observabilityaddons + shortNames: + - oba + singular: observabilityaddon + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: ObservabilityAddon is the Schema for the observabilityaddon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservabilityAddonSpec is the spec of observability addon + properties: + enableMetrics: + default: true + description: EnableMetrics indicates the observability addon push + metrics to hub server. + type: boolean + interval: + default: 300 + description: Interval for the observability addon push metrics to + hub server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + status: + description: ObservabilityAddonStatus defines the observed state of ObservabilityAddon + properties: + conditions: + items: + description: StatusCondition contains condition information for + an observability addon + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_v1beta1_crd.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_v1beta1_crd.yaml new file mode 100644 index 000000000..8f166d43c --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/observability.open-cluster-management.io_observabilityaddon_v1beta1_crd.yaml @@ -0,0 +1,120 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: observabilityaddons.observability.open-cluster-management.io +spec: + group: observability.open-cluster-management.io + names: + kind: ObservabilityAddon + listKind: ObservabilityAddonList + plural: observabilityaddons + shortNames: + - oba + singular: observabilityaddon + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ObservabilityAddon is the Schema for the observabilityaddon API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ObservabilityAddonSpec is the spec of observability addon + properties: + enableMetrics: + description: EnableMetrics indicates the observability addon push metrics + to hub server. + type: boolean + interval: + description: Interval for the observability addon push metrics to hub + server. + format: int32 + maximum: 3600 + minimum: 15 + type: integer + resources: + description: Resource requirement for metrics-collector + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources + required. If Requests is omitted for a container, it defaults + to Limits if that is explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + status: + description: ObservabilityAddonStatus defines the observed state of ObservabilityAddon + properties: + conditions: + items: + description: StatusCondition contains condition information for an + observability addon + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml new file mode 100644 index 000000000..e107c7db9 --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: endpoint-observability-operator +spec: + replicas: 1 + selector: + matchLabels: + name: endpoint-observability-operator + template: + metadata: + labels: + name: endpoint-observability-operator + spec: + serviceAccountName: endpoint-observability-operator-sa + containers: + - name: endpoint-observability-operator + # Replace this with the built image name + image: REPLACE_WITH_OPERATOR_IMAGE + command: + - endpoint-monitoring-operator + resources: + requests: + cpu: 2m + memory: 50Mi + imagePullPolicy: Always + ports: + - containerPort: 8383 + name: metrics + protocol: TCP + env: + - name: HUB_NAMESPACE + value: REPLACE_WITH_HUB_CLUSTER_NAMESPACE + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + - name: OPERATOR_NAME + value: "endpoint-monitoring-operator" + - name: HUB_KUBECONFIG + value: "/spoke/hub-kubeconfig/kubeconfig" + - name: INSTALL_PROM + value: "false" + - name: PULL_SECRET + value: "REPLACE_WITH_IMAGEPULLSECRET" + volumeMounts: + - name: hub-kubeconfig-secret + mountPath: "/spoke/hub-kubeconfig" + readOnly: true + volumes: + - name: hub-kubeconfig-secret + secret: + secretName: observability-controller-hub-kubeconfig diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml new file mode 100644 index 000000000..c4e7c0ad9 --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml @@ -0,0 +1,210 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:endpoint-observability-operator +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + - nodes/proxy + verbs: + - list + - watch + - get +- apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - services + - serviceaccounts + verbs: + - create + - get + - list + - watch + - delete + - update +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - observability.open-cluster-management.io + resources: + - multiclusterobservabilities + - observabilityaddons + verbs: + - list + - watch + - get +- apiGroups: + - observability.open-cluster-management.io + resources: + - observabilityaddons/status + verbs: + - get + - update +- apiGroups: + - config.openshift.io + resources: + - clusterversions + - infrastructures + verbs: + - list + - watch + - get +- apiGroups: + - work.open-cluster-management.io + resources: + - appliedmanifestworks + verbs: + - get +- apiGroups: + - work.open-cluster-management.io + resources: + - appliedmanifestworks/finalizers + verbs: + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - delete +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- nonResourceURLs: + - /metrics + - /federate + verbs: + - get +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - endpoints + - limitranges + - namespaces + - persistentvolumeclaims + - persistentvolumes + - replicationcontrollers + - resourcequotas + verbs: + - get + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + - volumeattachments + verbs: + - list + - watch +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - get + - list + - watch diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/role_binding.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/role_binding.yaml new file mode 100644 index 000000000..0267caa43 --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/role_binding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: open-cluster-management:endpoint-observability-operator-rb +subjects: +- kind: ServiceAccount + name: endpoint-observability-operator-sa + namespace: open-cluster-management-observability +roleRef: + kind: ClusterRole + name: open-cluster-management:endpoint-observability-operator + apiGroup: rbac.authorization.k8s.io diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/service_account.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/service_account.yaml new file mode 100644 index 000000000..4853c155e --- /dev/null +++ b/operators/multiclusterobservability/manifests/endpoint-observability/service_account.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: endpoint-observability-operator-sa +imagePullSecrets: +- name: REPLACE_WITH_IMAGEPULLSECRET diff --git a/operators/multiclusterobservability/pkg/certificates/approver.go b/operators/multiclusterobservability/pkg/certificates/approver.go new file mode 100644 index 000000000..31efe88c5 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/approver.go @@ -0,0 +1,24 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "strings" + + certificatesv1 "k8s.io/api/certificates/v1" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func approve(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, + csr *certificatesv1.CertificateSigningRequest) bool { + if strings.HasPrefix(csr.Spec.Username, "system:open-cluster-management:"+cluster.Name) { + log.Info("CSR approved") + return true + } else { + log.Info("CSR not approved due to illegal requester", "requester", csr.Spec.Username) + return false + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/approver_test.go b/operators/multiclusterobservability/pkg/certificates/approver_test.go new file mode 100644 index 000000000..ebf667cb1 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/approver_test.go @@ -0,0 +1,41 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "testing" + + certificatesv1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + clusterName = "test" +) + +func TestApprove(t *testing.T) { + cluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + csr := &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + Username: "system:open-cluster-management:" + clusterName, + }, + } + if !approve(cluster, nil, csr) { + t.Fatal("csr not approved automatically") + } + illCsr := &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + Username: "illegal", + }, + } + if approve(cluster, nil, illCsr) { + t.Fatal("illegal csr approved automatically") + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/cert_agent.go b/operators/multiclusterobservability/pkg/certificates/cert_agent.go new file mode 100644 index 000000000..417131611 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/cert_agent.go @@ -0,0 +1,50 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "k8s.io/apimachinery/pkg/runtime" + + "open-cluster-management.io/addon-framework/pkg/agent" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + addonName = "observability-controller" + agentName = "observability" +) + +type ObservabilityAgent struct{} + +func (o *ObservabilityAgent) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { + return nil, nil +} + +func (o *ObservabilityAgent) GetAgentAddonOptions() agent.AgentAddonOptions { + return agent.AgentAddonOptions{ + AddonName: addonName, + Registration: &agent.RegistrationOption{ + CSRConfigurations: observabilitySignerConfigurations(), + CSRApproveCheck: approve, + PermissionConfig: func(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) error { + return nil + }, + CSRSign: sign, + }, + } +} + +func observabilitySignerConfigurations() func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + return func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig { + observabilityConfig := addonapiv1alpha1.RegistrationConfig{ + SignerName: "open-cluster-management.io/observability-signer", + Subject: addonapiv1alpha1.Subject{ + User: "managed-cluster-observability", + OrganizationUnits: []string{"acm"}, + }, + } + return append(agent.KubeClientSignerConfigurations(addonName, agentName)(cluster), observabilityConfig) + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/cert_agent_test.go b/operators/multiclusterobservability/pkg/certificates/cert_agent_test.go new file mode 100644 index 000000000..39339d461 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/cert_agent_test.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func TestCertAgent(t *testing.T) { + agent := &ObservabilityAgent{} + agent.Manifests(nil, nil) + options := agent.GetAgentAddonOptions() + cluster := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + } + configs := options.Registration.CSRConfigurations(cluster) + if len(configs) != 2 { + t.Fatal("Wrong CSRConfigurations") + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/cert_controller.go b/operators/multiclusterobservability/pkg/certificates/cert_controller.go new file mode 100644 index 000000000..2a3f0e3fd --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/cert_controller.go @@ -0,0 +1,237 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + "reflect" + "time" + + appv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" + "open-cluster-management.io/addon-framework/pkg/addonmanager" +) + +const ( + restartLabel = "cert/time-restarted" +) + +var ( + caSecretNames = []string{serverCACerts, clientCACerts} + isCertControllerRunnning = false +) + +func Start(c client.Client, ingressCtlCrdExists bool) { + + if isCertControllerRunnning { + return + } + isCertControllerRunnning = true + + // setup ocm addon manager + addonMgr, err := addonmanager.New(ctrl.GetConfigOrDie()) + if err != nil { + log.Error(err, "Failed to init addon manager") + os.Exit(1) + } + agent := &ObservabilityAgent{} + err = addonMgr.AddAgent(agent) + if err != nil { + log.Error(err, "Failed to add agent for addon manager") + os.Exit(1) + } + + err = addonMgr.Start(context.TODO()) + if err != nil { + log.Error(err, "Failed to start addon manager") + os.Exit(1) + } + + kubeClient, err := kubernetes.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + log.Error(err, "Failed to create kube client") + os.Exit(1) + } + watchlist := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "secrets", config.GetDefaultNamespace(), + fields.OneTermEqualSelector("metadata.namespace", config.GetDefaultNamespace())) + _, controller := cache.NewInformer( + watchlist, + &v1.Secret{}, + time.Minute*60, + cache.ResourceEventHandlerFuncs{ + AddFunc: onAdd(c), + + DeleteFunc: onDelete(c), + + UpdateFunc: onUpdate(c, ingressCtlCrdExists), + }, + ) + + stop := make(chan struct{}) + go controller.Run(stop) +} + +func restartPods(c client.Client, s v1.Secret, isUpdate bool) { + if config.GetMonitoringCRName() == "" { + return + } + dName := "" + if s.Name == config.ServerCACerts || s.Name == config.GrafanaCerts { + dName = config.GetOperandName(config.RBACQueryProxy) + } + if s.Name == config.ClientCACerts || s.Name == config.ServerCerts { + dName = config.GetOperandName(config.ObservatoriumAPI) + } + if dName != "" { + updateDeployLabel(c, dName, isUpdate) + } +} + +func updateDeployLabel(c client.Client, dName string, isUpdate bool) { + dep := &appv1.Deployment{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: dName, + Namespace: config.GetDefaultNamespace(), + }, dep) + if err != nil { + if !errors.IsNotFound(err) { + log.Error(err, "Failed to check the deployment", "name", dName) + } + return + } + if isUpdate || dep.Status.ReadyReplicas != 0 { + dep.Spec.Template.ObjectMeta.Labels[restartLabel] = time.Now().Format("2006-1-2.1504") + err = c.Update(context.TODO(), dep) + if err != nil { + log.Error(err, "Failed to update the deployment", "name", dName) + } else { + log.Info("Update deployment cert/restart label", "name", dName) + } + } +} + +func needsRenew(s v1.Secret) bool { + certSecretNames := []string{serverCACerts, clientCACerts, serverCerts, grafanaCerts} + if !util.Contains(certSecretNames, s.Name) { + return false + } + data := s.Data["tls.crt"] + if len(data) == 0 { + log.Info("miss cert, need to recreate", "name", s.Name) + return true + } + block, _ := pem.Decode(data) + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + log.Error(err, "wrong certificate found, need to recreate", "name", s.Name) + return true + } + cert := certs[0] + maxWait := cert.NotAfter.Sub(cert.NotBefore) / 5 + latestTime := cert.NotAfter.Add(-maxWait) + if time.Now().After(latestTime) { + log.Info(fmt.Sprintf("certificate expired in %6.3f hours, need to renew", + time.Until(cert.NotAfter).Hours()), "secret", s.Name) + return true + } + + return false +} + +func onAdd(c client.Client) func(obj interface{}) { + return func(obj interface{}) { + restartPods(c, *obj.(*v1.Secret), false) + } +} + +func onDelete(c client.Client) func(obj interface{}) { + return func(obj interface{}) { + s := *obj.(*v1.Secret) + if util.Contains(caSecretNames, s.Name) { + mco := &mcov1beta2.MultiClusterObservability{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: config.GetMonitoringCRName(), + }, mco) + if err == nil { + log.Info("secret for ca certificate deleted by mistake, add the cert back to the new created one", "name", s.Name) + i := 0 + for { + caSecret := &v1.Secret{} + err = c.Get(context.TODO(), types.NamespacedName{ + Name: s.Name, + Namespace: config.GetDefaultNamespace(), + }, caSecret) + if err == nil { + caSecret.Data["tls.crt"] = append(caSecret.Data["tls.crt"], s.Data["tls.crt"]...) + err = c.Update(context.TODO(), caSecret) + if err != nil { + log.Error(err, "Failed to update secret for ca certificate", "name", s.Name) + } + break + } else { + // wait mco operator recreate the ca certificate at most 30 seconds + if i < 6 { + time.Sleep(5 * time.Second) + i++ + } else { + log.Info("new secret for ca certificate not created") + break + } + } + } + } + } + } +} + +func onUpdate(c client.Client, ingressCtlCrdExists bool) func(oldObj, newObj interface{}) { + return func(oldObj, newObj interface{}) { + oldS := *oldObj.(*v1.Secret) + newS := *newObj.(*v1.Secret) + if !reflect.DeepEqual(oldS.Data, newS.Data) { + restartPods(c, newS, true) + } else { + if util.Contains(caSecretNames, newS.Name) { + removeExpiredCA(c, newS.Name) + } + if needsRenew(newS) { + var err error + var hosts []string + switch name := newS.Name; { + case name == serverCACerts: + err, _ = createCASecret(c, nil, nil, true, serverCACerts, serverCACertifcateCN) + case name == clientCACerts: + err, _ = createCASecret(c, nil, nil, true, clientCACerts, clientCACertificateCN) + case name == grafanaCerts: + err = createCertSecret(c, nil, nil, true, grafanaCerts, false, grafanaCertificateCN, nil, nil, nil) + case name == serverCerts: + hosts, err = getHosts(c, ingressCtlCrdExists) + if err == nil { + err = createCertSecret(c, nil, nil, true, serverCerts, true, serverCertificateCN, nil, hosts, nil) + } + default: + return + } + if err != nil { + log.Error(err, "Failed to renew the certificate", "name", newS.Name) + } + } + } + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/cert_controller_test.go b/operators/multiclusterobservability/pkg/certificates/cert_controller_test.go new file mode 100644 index 000000000..379b4c478 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/cert_controller_test.go @@ -0,0 +1,124 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "context" + "testing" + "time" + + appv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +func init() { + //logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(os.Stdout))) + + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + config.SetMonitoringCRName(name) +} + +func newDeployment(name string) *appv1.Deployment { + return &appv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + CreationTimestamp: metav1.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC), + }, + Spec: appv1.DeploymentSpec{ + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"label": "value"}, + }, + }, + }, + Status: appv1.DeploymentStatus{ + ReadyReplicas: 1, + }, + } +} + +func TestOnAdd(t *testing.T) { + c := fake.NewFakeClient() + caSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverCACerts, + Namespace: namespace, + CreationTimestamp: metav1.Date(2020, time.January, 2, 0, 0, 0, 0, time.UTC), + }, + } + config.SetOperandNames(c) + onAdd(c)(caSecret) + c = fake.NewFakeClient(newDeployment(name+"-rbac-query-proxy"), + newDeployment(name+"-observatorium-api")) + onAdd(c)(caSecret) + dep := &appv1.Deployment{} + c.Get(context.TODO(), + types.NamespacedName{Name: name + "-rbac-query-proxy", Namespace: namespace}, + dep) + if dep.Spec.Template.ObjectMeta.Labels[restartLabel] == "" { + t.Fatalf("Failed to inject restart label") + } + caSecret.Name = clientCACerts + onAdd(c)(caSecret) + c.Get(context.TODO(), + types.NamespacedName{Name: name + "-observatorium-api", Namespace: namespace}, + dep) + if dep.Spec.Template.ObjectMeta.Labels[restartLabel] == "" { + t.Fatalf("Failed to inject restart label") + } +} + +func TestOnDelete(t *testing.T) { + caSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverCACerts, + Namespace: namespace, + }, + Data: map[string][]byte{ + "tls.crt": []byte("new cert-"), + }, + } + deletCaSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverCACerts, + Namespace: namespace, + }, + Data: map[string][]byte{ + "tls.crt": []byte("old cert"), + }, + } + c := fake.NewFakeClient(caSecret, getMco()) + onDelete(c)(deletCaSecret) + c.Get(context.TODO(), types.NamespacedName{Name: serverCACerts, Namespace: namespace}, caSecret) + data := string(caSecret.Data["tls.crt"]) + if data != "new cert-old cert" { + t.Fatalf("deleted cert not added back: %s", data) + } +} + +func TestOnUpdate(t *testing.T) { + certSecret := getExpiredCertSecret() + oldCertLength := len(certSecret.Data["tls.crt"]) + c := fake.NewFakeClient(certSecret) + onUpdate(c, true)(certSecret, certSecret) + certSecret.Name = clientCACerts + onUpdate(c, true)(certSecret, certSecret) + certSecret.Name = grafanaCerts + onUpdate(c, true)(certSecret, certSecret) + certSecret.Name = serverCerts + onUpdate(c, true)(certSecret, certSecret) + c.Get(context.TODO(), types.NamespacedName{Name: serverCACerts, Namespace: namespace}, certSecret) + if len(certSecret.Data["tls.crt"]) <= oldCertLength { + t.Fatal("certificate not renewed correctly") + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/certificates.go b/operators/multiclusterobservability/pkg/certificates/certificates.go new file mode 100644 index 000000000..99b4f44ac --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/certificates.go @@ -0,0 +1,442 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "bytes" + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + serverCACertifcateCN = "observability-server-ca-certificate" + serverCACerts = config.ServerCACerts + serverCertificateCN = config.ServerCertCN + serverCerts = config.ServerCerts + + clientCACertificateCN = "observability-client-ca-certificate" + clientCACerts = config.ClientCACerts + grafanaCertificateCN = config.GrafanaCN + grafanaCerts = config.GrafanaCerts +) + +var ( + log = logf.Log.WithName("controller_certificates") + serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) +) + +func CreateObservabilityCerts(c client.Client, scheme *runtime.Scheme, mco *mcov1beta2.MultiClusterObservability, ingressCtlCrdExists bool) error { + + config.SetCertDuration(mco.Annotations) + + err, serverCrtUpdated := createCASecret(c, scheme, mco, false, serverCACerts, serverCACertifcateCN) + if err != nil { + return err + } + err, clientCrtUpdated := createCASecret(c, scheme, mco, false, clientCACerts, clientCACertificateCN) + if err != nil { + return err + } + hosts, err := getHosts(c, ingressCtlCrdExists) + if err != nil { + return err + } + err = createCertSecret(c, scheme, mco, serverCrtUpdated, serverCerts, true, serverCertificateCN, nil, hosts, nil) + if err != nil { + return err + } + err = createCertSecret(c, scheme, mco, clientCrtUpdated, grafanaCerts, false, grafanaCertificateCN, nil, nil, nil) + if err != nil { + return err + } + + return nil +} + +func createCASecret(c client.Client, + scheme *runtime.Scheme, mco *mcov1beta2.MultiClusterObservability, + isRenew bool, name string, cn string) (error, bool) { + if isRenew { + log.Info("To renew CA certificates", "name", name) + } + caSecret := &corev1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: name}, caSecret) + if err != nil { + if !errors.IsNotFound(err) { + log.Error(err, "Failed to check ca secret", "name", name) + return err, false + } else { + key, cert, err := createCACertificate(cn, nil) + if err != nil { + return err, false + } + certPEM, keyPEM := pemEncode(cert, key) + caSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: config.GetDefaultNamespace(), + }, + Data: map[string][]byte{ + "ca.crt": certPEM.Bytes(), + "tls.crt": certPEM.Bytes(), + "tls.key": keyPEM.Bytes(), + }, + } + if mco != nil { + if err := controllerutil.SetControllerReference(mco, caSecret, scheme); err != nil { + return err, false + } + } + + if err := c.Create(context.TODO(), caSecret); err != nil { + log.Error(err, "Failed to create secret", "name", name) + return err, false + } else { + return nil, true + } + } + } else { + if !isRenew { + log.Info("CA secrets already existed", "name", name) + } else { + block, _ := pem.Decode(caSecret.Data["tls.key"]) + caKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + log.Error(err, "Wrong private key found, create new one", "name", name) + caKey = nil + } + key, cert, err := createCACertificate(cn, caKey) + if err != nil { + return err, false + } + certPEM, keyPEM := pemEncode(cert, key) + caSecret.Data["ca.crt"] = certPEM.Bytes() + caSecret.Data["tls.crt"] = append(certPEM.Bytes(), caSecret.Data["tls.crt"]...) + caSecret.Data["tls.key"] = keyPEM.Bytes() + if err := c.Update(context.TODO(), caSecret); err != nil { + log.Error(err, "Failed to update secret", "name", name) + return err, false + } else { + log.Info("CA certificates renewed", "name", name) + return nil, true + } + } + } + return nil, false +} + +func createCACertificate(cn string, caKey *rsa.PrivateKey) ([]byte, []byte, error) { + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Error(err, "failed to generate serial number") + return nil, nil, err + } + ca := &x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{ + Organization: []string{"Red Hat, Inc."}, + Country: []string{"US"}, + CommonName: cn, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(config.GetCertDuration() * 5), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + if caKey == nil { + caKey, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Error(err, "Failed to generate private key", "cn", cn) + return nil, nil, err + } + } + + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey) + if err != nil { + log.Error(err, "Failed to create certificate", "cn", cn) + return nil, nil, err + } + caKeyBytes := x509.MarshalPKCS1PrivateKey(caKey) + return caKeyBytes, caBytes, nil +} + +func createCertSecret(c client.Client, + scheme *runtime.Scheme, mco *mcov1beta2.MultiClusterObservability, + isRenew bool, name string, isServer bool, + cn string, ou []string, dns []string, ips []net.IP) error { + if isRenew { + log.Info("To renew certificates", "name", name) + } + crtSecret := &corev1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: name}, crtSecret) + if err != nil { + if !errors.IsNotFound(err) { + log.Error(err, "Failed to check certificate secret", "name", name) + return err + } else { + caCert, caKey, caCertBytes, err := getCA(c, isServer) + if err != nil { + return err + } + key, cert, err := createCertificate(isServer, cn, ou, dns, ips, caCert, caKey, nil) + if err != nil { + return err + } + certPEM, keyPEM := pemEncode(cert, key) + crtSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: config.GetDefaultNamespace(), + }, + Data: map[string][]byte{ + "ca.crt": caCertBytes, + "tls.crt": certPEM.Bytes(), + "tls.key": keyPEM.Bytes(), + }, + } + if mco != nil { + if err := controllerutil.SetControllerReference(mco, crtSecret, scheme); err != nil { + return err + } + } + err = c.Create(context.TODO(), crtSecret) + if err != nil { + log.Error(err, "Failed to create secret", "name", name) + return err + } + } + } else { + if crtSecret.Name == serverCerts && !isRenew { + block, _ := pem.Decode(crtSecret.Data["tls.crt"]) + if block == nil || block.Bytes == nil { + log.Info("Empty block in server certificate, skip") + } else { + serverCrt, err := x509.ParseCertificate(block.Bytes) + if err != nil { + log.Error(err, "Failed to parse the server certificate, renew it") + isRenew = true + } + // to handle upgrade scenario in which hosts maybe update + for _, dnsString := range dns { + if !util.Contains(serverCrt.DNSNames, dnsString) { + isRenew = true + break + } + } + } + } + + if !isRenew { + log.Info("Certificate secrets already existed", "name", name) + } else { + caCert, caKey, caCertBytes, err := getCA(c, isServer) + if err != nil { + return err + } + block, _ := pem.Decode(crtSecret.Data["tls.key"]) + crtkey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + log.Error(err, "Wrong private key found, create new one", "name", name) + crtkey = nil + } + key, cert, err := createCertificate(isServer, cn, ou, dns, ips, caCert, caKey, crtkey) + if err != nil { + return err + } + certPEM, keyPEM := pemEncode(cert, key) + crtSecret.Data["ca.crt"] = caCertBytes + crtSecret.Data["tls.crt"] = certPEM.Bytes() + crtSecret.Data["tls.key"] = keyPEM.Bytes() + if err := c.Update(context.TODO(), crtSecret); err != nil { + log.Error(err, "Failed to update secret", "name", name) + return err + } else { + log.Info("Certificates renewed", "name", name) + } + } + } + return nil +} + +func createCertificate(isServer bool, cn string, ou []string, dns []string, ips []net.IP, + caCert *x509.Certificate, caKey *rsa.PrivateKey, key *rsa.PrivateKey) ([]byte, []byte, error) { + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Error(err, "failed to generate serial number") + return nil, nil, err + } + + cert := &x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{ + Organization: []string{"Red Hat, Inc."}, + Country: []string{"US"}, + CommonName: cn, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(config.GetCertDuration()), + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + } + if !isServer { + cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + } + if ou != nil { + cert.Subject.OrganizationalUnit = ou + } + if dns != nil { + dns = append(dns[:1], dns[0:]...) + dns[0] = cn + cert.DNSNames = dns + } else { + cert.DNSNames = []string{cn} + } + if ips != nil { + cert.IPAddresses = ips + } + + if key == nil { + key, err = rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Error(err, "Failed to generate private key", "cn", cn) + return nil, nil, err + } + } + + caBytes, err := x509.CreateCertificate(rand.Reader, cert, caCert, &key.PublicKey, caKey) + if err != nil { + log.Error(err, "Failed to create certificate", "cn", cn) + return nil, nil, err + } + keyBytes := x509.MarshalPKCS1PrivateKey(key) + return keyBytes, caBytes, nil +} + +func getCA(c client.Client, isServer bool) (*x509.Certificate, *rsa.PrivateKey, []byte, error) { + caCertName := serverCACerts + if !isServer { + caCertName = clientCACerts + } + caSecret := &corev1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: caCertName}, caSecret) + if err != nil { + log.Error(err, "Failed to get ca secret", "name", caCertName) + return nil, nil, nil, err + } + block1, rest := pem.Decode(caSecret.Data["tls.crt"]) + caCertBytes := caSecret.Data["tls.crt"][:len(caSecret.Data["tls.crt"])-len(rest)] + caCerts, err := x509.ParseCertificates(block1.Bytes) + if err != nil { + log.Error(err, "Failed to parse ca cert", "name", caCertName) + return nil, nil, nil, err + } + block2, _ := pem.Decode(caSecret.Data["tls.key"]) + caKey, err := x509.ParsePKCS1PrivateKey(block2.Bytes) + if err != nil { + log.Error(err, "Failed to parse ca key", "name", caCertName) + return nil, nil, nil, err + } + return caCerts[0], caKey, caCertBytes, nil +} + +func removeExpiredCA(c client.Client, name string) { + caSecret := &corev1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: name}, caSecret) + if err != nil { + log.Error(err, "Failed to get ca secret", "name", name) + return + } + data := caSecret.Data["tls.crt"] + _, restData := pem.Decode(data) + caSecret.Data["tls.crt"] = data[:len(data)-len(restData)] + if len(restData) > 0 { + for { + var block *pem.Block + index := len(data) - len(restData) + block, restData = pem.Decode(restData) + certs, err := x509.ParseCertificates(block.Bytes) + removeFlag := false + if err != nil { + log.Error(err, "Find wrong cert bytes, needs to remove it", "name", name) + removeFlag = true + } else { + if time.Now().After(certs[0].NotAfter) { + log.Info("CA certificate expired, needs to remove it", "name", name) + removeFlag = true + } + } + if !removeFlag { + caSecret.Data["tls.crt"] = append(caSecret.Data["tls.crt"], data[index:len(data)-len(restData)]...) + } + if len(restData) == 0 { + break + } + } + } + if len(data) != len(caSecret.Data["tls.crt"]) { + err = c.Update(context.TODO(), caSecret) + if err != nil { + log.Error(err, "Failed to update ca secret to removed expired ca", "name", name) + } else { + log.Info("Expired certificates are removed", "name", name) + } + } +} + +func pemEncode(cert []byte, key []byte) (*bytes.Buffer, *bytes.Buffer) { + certPEM := new(bytes.Buffer) + err := pem.Encode(certPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert, + }) + if err != nil { + log.Error(err, "Failed to encode cert") + } + + keyPEM := new(bytes.Buffer) + err = pem.Encode(keyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: key, + }) + if err != nil { + log.Error(err, "Failed to encode key") + } + + return certPEM, keyPEM +} + +func getHosts(c client.Client, ingressCtlCrdExists bool) ([]string, error) { + hosts := []string{config.GetObsAPISvc(config.GetOperandName(config.Observatorium))} + if ingressCtlCrdExists { + url, err := config.GetObsAPIHost(c, config.GetDefaultNamespace()) + if err != nil { + log.Error(err, "Failed to get api route address") + return nil, err + } else { + hosts = append(hosts, url) + } + } + return hosts, nil +} diff --git a/operators/multiclusterobservability/pkg/certificates/certificates_test.go b/operators/multiclusterobservability/pkg/certificates/certificates_test.go new file mode 100644 index 000000000..64d7d92bf --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/certificates_test.go @@ -0,0 +1,119 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "testing" + "time" + + routev1 "github.com/openshift/api/route/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +var ( + name = "observability" + namespace = mcoconfig.GetDefaultNamespace() +) + +func getMco() *mcov1beta2.MultiClusterObservability { + return &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{}, + } +} + +func getExpiredCertSecret() *v1.Secret { + date := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC) + ca := &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Country: []string{"US"}, + }, + NotBefore: date, + NotAfter: date.AddDate(1, 0, 0), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageCertSign, + } + caKey, _ := rsa.GenerateKey(rand.Reader, 2048) + caBytes, _ := x509.CreateCertificate(rand.Reader, ca, ca, &caKey.PublicKey, caKey) + certPEM, keyPEM := pemEncode(caBytes, x509.MarshalPKCS1PrivateKey(caKey)) + caSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: serverCACerts, + Namespace: namespace, + }, + Data: map[string][]byte{ + "ca.crt": caBytes, + "tls.crt": append(certPEM.Bytes(), certPEM.Bytes()...), + "tls.key": keyPEM.Bytes(), + }, + } + return caSecret +} + +func TestCreateCertificates(t *testing.T) { + route := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: "observatorium-api", + Namespace: namespace, + }, + Spec: routev1.RouteSpec{ + Host: "apiServerURL", + }, + } + mco := getMco() + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + routev1.AddToScheme(s) + + c := fake.NewFakeClient(route) + + err := CreateObservabilityCerts(c, s, mco, true) + if err != nil { + t.Fatalf("CreateObservabilityCerts: (%v)", err) + } + + err = CreateObservabilityCerts(c, s, mco, true) + if err != nil { + t.Fatalf("Rerun CreateObservabilityCerts: (%v)", err) + } + + err, _ = createCASecret(c, s, mco, true, serverCACerts, serverCACertifcateCN) + if err != nil { + t.Fatalf("Failed to renew server ca certificates: (%v)", err) + } + + err = createCertSecret(c, s, mco, true, grafanaCerts, false, grafanaCertificateCN, nil, nil, nil) + if err != nil { + t.Fatalf("Failed to renew server certificates: (%v)", err) + } +} + +func TestRemoveExpiredCA(t *testing.T) { + + caSecret := getExpiredCertSecret() + oldCertLength := len(caSecret.Data["tls.crt"]) + c := fake.NewFakeClient(caSecret) + removeExpiredCA(c, serverCACerts) + c.Get(context.TODO(), + types.NamespacedName{Name: serverCACerts, Namespace: namespace}, + caSecret) + if len(caSecret.Data["tls.crt"]) != oldCertLength/2 { + t.Fatal("Expired certificate not removed correctly") + } +} diff --git a/operators/multiclusterobservability/pkg/certificates/signer.go b/operators/multiclusterobservability/pkg/certificates/signer.go new file mode 100644 index 000000000..5b2d182b8 --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/signer.go @@ -0,0 +1,94 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "errors" + "os" + "time" + + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/signer" + "github.com/cloudflare/cfssl/signer/local" + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func getClient(s *runtime.Scheme) (client.Client, error) { + if os.Getenv("TEST") != "" { + c := fake.NewFakeClient() + return c, nil + } + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return nil, errors.New("failed to create the kube config") + } + options := client.Options{} + if s != nil { + options = client.Options{Scheme: s} + } + c, err := client.New(config, options) + if err != nil { + return nil, errors.New("failed to create the kube client") + } + return c, nil +} + +func sign(csr *certificatesv1.CertificateSigningRequest) []byte { + c, err := getClient(nil) + if err != nil { + log.Error(err, err.Error()) + return nil + } + if os.Getenv("TEST") != "" { + err, _ := createCASecret(c, nil, nil, false, clientCACerts, clientCACertificateCN) + if err != nil { + log.Error(err, "Failed to create CA") + } + } + caCert, caKey, _, err := getCA(c, false) + if err != nil { + return nil + } + + var usages []string + for _, usage := range csr.Spec.Usages { + usages = append(usages, string(usage)) + } + + certExpiryDuration := 365 * 24 * time.Hour + durationUntilExpiry := time.Until(caCert.NotAfter) + if durationUntilExpiry <= 0 { + log.Error(errors.New("signer has expired"), "the signer has expired", "expired time", caCert.NotAfter) + return nil + } + if durationUntilExpiry < certExpiryDuration { + certExpiryDuration = durationUntilExpiry + } + + policy := &config.Signing{ + Default: &config.SigningProfile{ + Usage: usages, + Expiry: certExpiryDuration, + ExpiryString: certExpiryDuration.String(), + }, + } + cfs, err := local.NewSigner(caKey, caCert, signer.DefaultSigAlgo(caKey), policy) + if err != nil { + log.Error(err, "Failed to create new local signer") + return nil + } + + signedCert, err := cfs.Sign(signer.SignRequest{ + Request: string(csr.Spec.Request), + }) + if err != nil { + log.Error(err, "Failed to sign the CSR") + return nil + } + return signedCert +} diff --git a/operators/multiclusterobservability/pkg/certificates/signer_test.go b/operators/multiclusterobservability/pkg/certificates/signer_test.go new file mode 100644 index 000000000..d20b476df --- /dev/null +++ b/operators/multiclusterobservability/pkg/certificates/signer_test.go @@ -0,0 +1,49 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package certificates + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "os" + "testing" + + certificatesv1 "k8s.io/api/certificates/v1" +) + +func init() { + os.Setenv("TEST", "true") +} + +func createCSR() []byte { + keys, _ := rsa.GenerateKey(rand.Reader, 2048) + + var csrTemplate = x509.CertificateRequest{ + Subject: pkix.Name{ + Country: []string{"US"}, + }, + SignatureAlgorithm: x509.SHA512WithRSA, + } + csrCertificate, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, keys) + csr := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", Bytes: csrCertificate, + }) + return csr +} + +func TestSign(t *testing.T) { + csr := &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + Request: createCSR(), + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageCertSign, certificatesv1.UsageClientAuth}, + }, + } + + if sign(csr) == nil { + t.Fatal("Failed to sign CSR") + } +} diff --git a/operators/multiclusterobservability/pkg/config/azure_conf.go b/operators/multiclusterobservability/pkg/config/azure_conf.go new file mode 100644 index 000000000..7102f9852 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/azure_conf.go @@ -0,0 +1,52 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "errors" + "strings" + + "gopkg.in/yaml.v2" +) + +func validateAzure(conf Config) error { + + if conf.StorageAccount == "" { + return errors.New("no storage_account as azure storage account in config file") + } + + if conf.StorageAccountKey == "" { + return errors.New("no storage_account_key as azure storage account key in config file") + } + + if conf.Container == "" { + return errors.New("no container as azure container in config file") + } + + if conf.Endpoint == "" { + return errors.New("no endpoint as azure endpoint in config file") + } + + return nil +} + +// IsValidAzureConf is used to validate azure configuration +func IsValidAzureConf(data []byte) (bool, error) { + var objectConfg ObjectStorgeConf + err := yaml.Unmarshal(data, &objectConfg) + if err != nil { + return false, err + } + + if strings.ToLower(objectConfg.Type) != "azure" { + return false, errors.New("invalid type config, only azure type is supported") + } + + err = validateAzure(objectConfg.Config) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/operators/multiclusterobservability/pkg/config/config.go b/operators/multiclusterobservability/pkg/config/config.go new file mode 100644 index 000000000..491cf9d53 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/config.go @@ -0,0 +1,1089 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + ocinfrav1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + ocpClientSet "github.com/openshift/client-go/config/clientset/versioned" + obsv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + observabilityv1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" +) + +const ( + crLabelKey = "observability.open-cluster-management.io/name" + clusterNameLabelKey = "cluster" + obsAPIGateway = "observatorium-api" + infrastructureConfigName = "cluster" + defaultMCONamespace = "open-cluster-management" + defaultNamespace = "open-cluster-management-observability" + defaultTenantName = "default" + defaultCRName = "observability" + operandNamePrefix = "observability-" + OpenshiftIngressOperatorNamespace = "openshift-ingress-operator" + OpenshiftIngressNamespace = "openshift-ingress" + OpenshiftIngressOperatorCRName = "default" + OpenshiftIngressDefaultCertName = "router-certs-default" + OpenshiftIngressRouteCAName = "router-ca" + + AnnotationKeyImageRepository = "mco-imageRepository" + AnnotationKeyImageTagSuffix = "mco-imageTagSuffix" + AnnotationMCOPause = "mco-pause" + AnnotationMCOWithoutResourcesRequests = "mco-thanos-without-resources-requests" + AnnotationCertDuration = "mco-cert-duration" + + MCHUpdatedRequestName = "mch-updated-request" + MCOUpdatedRequestName = "mco-updated-request" + ImageManifestConfigMapNamePrefix = "mch-image-manifest-" + OCMManifestConfigMapTypeLabelKey = "ocm-configmap-type" + OCMManifestConfigMapTypeLabelValue = "image-manifest" + OCMManifestConfigMapVersionLabelKey = "ocm-release-version" + + ComponentVersion = "COMPONENT_VERSION" + + ServerCACerts = "observability-server-ca-certs" + ClientCACerts = "observability-client-ca-certs" + ServerCerts = "observability-server-certs" + ServerCertCN = "observability-server-certificate" + GrafanaCerts = "observability-grafana-certs" + GrafanaCN = "grafana" + ManagedClusterOU = "acm" + + AlertmanagerAccessorSAName = "observability-alertmanager-accessor" + /* #nosec */ + AlertmanagerAccessorSecretName = "observability-alertmanager-accessor" + AlertmanagerServiceName = "alertmanager" + AlertmanagerRouteName = "alertmanager" + AlertmanagerRouteBYOCAName = "alertmanager-byo-ca" + AlertmanagerRouteBYOCERTName = "alertmanager-byo-cert" + + AlertRuleDefaultConfigMapName = "thanos-ruler-default-rules" + AlertRuleDefaultFileKey = "default_rules.yaml" + AlertRuleCustomConfigMapName = "thanos-ruler-custom-rules" + AlertRuleCustomFileKey = "custom_rules.yaml" + AlertmanagerURL = "http://alertmanager:9093" + AlertmanagerConfigName = "alertmanager-config" + + AlertmanagersDefaultConfigMapName = "thanos-ruler-config" + AlertmanagersDefaultConfigFileKey = "config.yaml" + AlertmanagersDefaultCaBundleMountPath = "/etc/thanos/configmaps/alertmanager-ca-bundle" + AlertmanagersDefaultCaBundleName = "alertmanager-ca-bundle" + AlertmanagersDefaultCaBundleKey = "service-ca.crt" + + AllowlistCustomConfigMapName = "observability-metrics-custom-allowlist" + + ProxyServiceName = "rbac-query-proxy" + ProxyRouteName = "rbac-query-proxy" + ProxyRouteBYOCAName = "proxy-byo-ca" + ProxyRouteBYOCERTName = "proxy-byo-cert" + + ValidatingWebhookConfigurationName = "multicluster-observability-operator" + WebhookServiceName = "multicluster-observability-webhook-service" +) + +const ( + DefaultImgRepository = "quay.io/stolostron" + DefaultImgTagSuffix = "2.4.0-SNAPSHOT-2021-09-23-07-02-14" + + ObservatoriumImgRepo = "quay.io/observatorium" + ObservatoriumAPIImgName = "observatorium" + ObservatoriumOperatorImgName = "observatorium-operator" + ObservatoriumOperatorImgKey = "observatorium_operator" + ThanosReceiveControllerImgName = "thanos-receive-controller" + //ThanosReceiveControllerKey is used to get from mch-image-manifest.xxx configmap + ThanosReceiveControllerKey = "thanos_receive_controller" + ThanosReceiveControllerImgTag = "master-2021-04-28-ee165b6" + ThanosImgName = "thanos" + + MemcachedImgRepo = "quay.io/ocm-observability" + MemcachedImgName = "memcached" + MemcachedImgTag = "1.6.3-alpine" + + MemcachedExporterImgRepo = "quay.io/prometheus" + MemcachedExporterImgName = "memcached-exporter" + MemcachedExporterKey = "memcached_exporter" + MemcachedExporterImgTag = "v0.9.0" + + GrafanaImgKey = "grafana" + GrafanaDashboardLoaderName = "grafana-dashboard-loader" + GrafanaDashboardLoaderKey = "grafana_dashboard_loader" + + AlertManagerImgName = "prometheus-alertmanager" + AlertManagerImgKey = "prometheus_alertmanager" + ConfigmapReloaderImgRepo = "quay.io/openshift" + ConfigmapReloaderImgName = "origin-configmap-reloader" + ConfigmapReloaderImgTagSuffix = "4.8.0" + ConfigmapReloaderKey = "prometheus-config-reloader" + + OauthProxyImgRepo = "quay.io/stolostron" + OauthProxyImgName = "origin-oauth-proxy" + OauthProxyImgTagSuffix = "2.0.12-SNAPSHOT-2021-06-11-19-40-10" + OauthProxyKey = "oauth_proxy" + + EndpointControllerImgName = "endpoint-monitoring-operator" + EndpointControllerKey = "endpoint_monitoring_operator" + + RBACQueryProxyImgName = "rbac-query-proxy" + RBACQueryProxyKey = "rbac_query_proxy" + + RBACQueryProxyCPURequets = "20m" + RBACQueryProxyMemoryRequets = "100Mi" + + GrafanaCPURequets = "4m" + GrafanaMemoryRequets = "100Mi" + GrafanaCPULimits = "500m" + GrafanaMemoryLimits = "1Gi" + + AlertmanagerCPURequets = "4m" + AlertmanagerMemoryRequets = "200Mi" + + ObservatoriumAPICPURequets = "20m" + ObservatoriumAPIMemoryRequets = "128Mi" + + ThanosQueryFrontendCPURequets = "100m" + ThanosQueryFrontendMemoryRequets = "256Mi" + + MemcachedExporterCPURequets = "5m" + MemcachedExporterMemoryRequets = "50Mi" + + ThanosQueryCPURequets = "300m" + ThanosQueryMemoryRequets = "1Gi" + + ThanosCompactCPURequets = "100m" + ThanosCompactMemoryRequets = "512Mi" + + ObservatoriumReceiveControllerCPURequets = "4m" + ObservatoriumReceiveControllerMemoryRequets = "32Mi" + + ThanosReceiveCPURequets = "300m" + ThanosReceiveMemoryRequets = "512Mi" + + ThanosRuleCPURequets = "50m" + ThanosRuleMemoryRequets = "512Mi" + ThanosRuleReloaderCPURequets = "4m" + ThanosRuleReloaderMemoryRequets = "25Mi" + + ThanosCachedCPURequets = "45m" + ThanosCachedMemoryRequets = "128Mi" + ThanosCachedExporterCPURequets = "5m" + ThanosCachedExporterMemoryRequets = "50Mi" + + ThanosStoreCPURequets = "100m" + ThanosStoreMemoryRequets = "1Gi" + + MetricsCollectorCPURequets = "10m" + MetricsCollectorMemoryRequets = "100Mi" + MetricsCollectorCPULimits = "" + MetricsCollectorMemoryLimits = "" + + ObservatoriumAPI = "observatorium-api" + ThanosCompact = "thanos-compact" + ThanosQuery = "thanos-query" + ThanosQueryFrontend = "thanos-query-frontend" + ThanosQueryFrontendMemcached = "thanos-query-frontend-memcached" + ThanosRule = "thanos-rule" + ThanosReceive = "thanos-receive-default" + ThanosStoreMemcached = "thanos-store-memcached" + ThanosStoreShard = "thanos-store-shard" + MemcachedExporter = "memcached-exporter" + Grafana = "grafana" + RBACQueryProxy = "rbac-query-proxy" + Alertmanager = "alertmanager" + ThanosReceiveController = "thanos-receive-controller" + ObservatoriumOperator = "observatorium-operator" + MetricsCollector = "metrics-collector" + Observatorium = "observatorium" + + RetentionResolutionRaw = "30d" + RetentionResolution5m = "180d" + RetentionResolution1h = "0d" + RetentionInLocal = "24h" + DeleteDelay = "48h" + BlockDuration = "2h" + + DefaultImagePullPolicy = "Always" + DefaultImagePullSecret = "multiclusterhub-operator-pull-secret" + + ResourceLimits = "limits" + ResourceRequests = "requests" +) + +const ( + MCORsName = "multiclusterobservabilities" +) + +const ( + IngressControllerCRD = "ingresscontrollers.operator.openshift.io" + MCHCrdName = "multiclusterhubs.operator.open-cluster-management.io" + MCOCrdName = "multiclusterobservabilities.observability.open-cluster-management.io" + StorageVersionMigrationCrdName = "storageversionmigrations.migration.k8s.io" +) + +// ObjectStorgeConf is used to Unmarshal from bytes to do validation +type ObjectStorgeConf struct { + Type string `yaml:"type"` + Config Config `yaml:"config"` +} + +var ( + log = logf.Log.WithName("config") + monitoringCRName = "" + tenantUID = "" + imageManifests = map[string]string{} + imageManifestConfigMapName = "" + hasCustomRuleConfigMap = false + hasCustomAlertmanagerConfig = false + certDuration = time.Hour * 24 * 365 + + Replicas1 int32 = 1 + Replicas2 int32 = 2 + Replicas3 int32 = 3 + Replicas = map[string]*int32{ + ObservatoriumAPI: &Replicas2, + ThanosQuery: &Replicas2, + ThanosQueryFrontend: &Replicas2, + Grafana: &Replicas2, + RBACQueryProxy: &Replicas2, + + ThanosRule: &Replicas3, + ThanosReceive: &Replicas3, + ThanosStoreShard: &Replicas3, + ThanosStoreMemcached: &Replicas3, + ThanosQueryFrontendMemcached: &Replicas3, + Alertmanager: &Replicas3, + } + // use this map to store the operand name + operandNames = map[string]string{} + + MemoryLimitMB = int32(1024) + ConnectionLimit = int32(1024) + MaxItemSize = "1m" +) + +func GetReplicas(component string, advanced *observabilityv1beta2.AdvancedConfig) *int32 { + if advanced == nil { + return Replicas[component] + } + var replicas *int32 + switch component { + case ObservatoriumAPI: + if advanced.ObservatoriumAPI != nil { + replicas = advanced.ObservatoriumAPI.Replicas + } + case ThanosQuery: + if advanced.Query != nil { + replicas = advanced.Query.Replicas + } + case ThanosQueryFrontend: + if advanced.QueryFrontend != nil { + replicas = advanced.QueryFrontend.Replicas + } + case ThanosQueryFrontendMemcached: + if advanced.QueryFrontendMemcached != nil { + replicas = advanced.QueryFrontendMemcached.CommonSpec.Replicas + } + case ThanosRule: + if advanced.Rule != nil { + replicas = advanced.Rule.Replicas + } + case ThanosReceive: + if advanced.Receive != nil { + replicas = advanced.Receive.Replicas + } + case ThanosStoreMemcached: + if advanced.StoreMemcached != nil { + replicas = advanced.StoreMemcached.CommonSpec.Replicas + } + case ThanosStoreShard: + if advanced.Store != nil { + replicas = advanced.Store.Replicas + } + case RBACQueryProxy: + if advanced.RBACQueryProxy != nil { + replicas = advanced.RBACQueryProxy.Replicas + } + case Grafana: + if advanced.Grafana != nil { + replicas = advanced.Grafana.Replicas + } + case Alertmanager: + if advanced.Alertmanager != nil { + replicas = advanced.Alertmanager.Replicas + } + } + if replicas == nil || *replicas == 0 { + replicas = Replicas[component] + } + return replicas +} + +// GetCrLabelKey returns the key for the CR label injected into the resources created by the operator +func GetCrLabelKey() string { + return crLabelKey +} + +// GetClusterNameLabelKey returns the key for the injected label +func GetClusterNameLabelKey() string { + return clusterNameLabelKey +} + +func GetImageManifestConfigMapName() string { + return imageManifestConfigMapName +} + +// ReadImageManifestConfigMap reads configmap with the label ocm-configmap-type=image-manifest +func ReadImageManifestConfigMap(c client.Client, version string) (bool, error) { + mcoNamespace := GetMCONamespace() + // List image manifest configmap with label ocm-configmap-type=image-manifest and ocm-release-version + matchLabels := map[string]string{ + OCMManifestConfigMapTypeLabelKey: OCMManifestConfigMapTypeLabelValue, + OCMManifestConfigMapVersionLabelKey: version, + } + listOpts := []client.ListOption{ + client.InNamespace(mcoNamespace), + client.MatchingLabels(matchLabels), + } + + imageCMList := &corev1.ConfigMapList{} + err := c.List(context.TODO(), imageCMList, listOpts...) + if err != nil { + return false, fmt.Errorf("Failed to list mch-image-manifest configmaps: %v", err) + } + + if len(imageCMList.Items) != 1 { + // there should be only one matched image manifest configmap found + return false, nil + } + + imageManifests = imageCMList.Items[0].Data + log.V(1).Info("the length of mch-image-manifest configmap", "imageManifests", len(imageManifests)) + return true, nil +} + +// GetImageManifests... +func GetImageManifests() map[string]string { + return imageManifests +} + +// SetImageManifests sets imageManifests +func SetImageManifests(images map[string]string) { + imageManifests = images +} + +// ReplaceImage is used to replace the image with specified annotation or imagemanifest configmap +func ReplaceImage(annotations map[string]string, imageRepo, componentName string) (bool, string) { + if annotations != nil { + annotationImageRepo, _ := annotations[AnnotationKeyImageRepository] + if annotationImageRepo == "" { + annotationImageRepo = DefaultImgRepository + } + // This is for test only. e.g.: + // if there is "mco-metrics_collector-image" defined in annotation, use it for testing + componentImage, hasComponentImage := annotations["mco-"+componentName+"-image"] + tagSuffix, hasTagSuffix := annotations[AnnotationKeyImageTagSuffix] + sameOrg := strings.Contains(imageRepo, DefaultImgRepository) + + if hasComponentImage { + return true, componentImage + } else if hasTagSuffix && sameOrg { + repoSlice := strings.Split(imageRepo, "/") + imageName := strings.Split(repoSlice[len(repoSlice)-1], ":")[0] + image := annotationImageRepo + "/" + imageName + ":" + tagSuffix + log.V(1).Info("image replacement", "componentName", image) + return true, image + } else if !hasTagSuffix { + image, found := imageManifests[componentName] + log.V(1).Info("image replacement", "componentName", image) + if found { + return true, image + } + return false, "" + } + return false, "" + } else { + image, found := imageManifests[componentName] + log.V(1).Info("image replacement", "componentName", image) + if found { + return true, image + } + return false, "" + } +} + +// GetDefaultTenantName returns the default tenant name +func GetDefaultTenantName() string { + return defaultTenantName +} + +// GetObsAPIHost is used to get the URL for observartium api gateway +func GetObsAPIHost(client client.Client, namespace string) (string, error) { + found := &routev1.Route{} + + err := client.Get(context.TODO(), types.NamespacedName{Name: obsAPIGateway, Namespace: namespace}, found) + if err != nil && errors.IsNotFound(err) { + // if the observatorium-api router is not created yet, fallback to get host from the domain of ingresscontroller + domain, err := getDomainForIngressController(client, OpenshiftIngressOperatorCRName, OpenshiftIngressOperatorNamespace) + if err != nil { + return "", nil + } + return obsAPIGateway + "-" + namespace + "." + domain, nil + } else if err != nil { + return "", err + } + return found.Spec.Host, nil +} + +func GetMCONamespace() string { + podNamespace, found := os.LookupEnv("POD_NAMESPACE") + if !found { + podNamespace = defaultMCONamespace + } + return podNamespace +} + +// GetAlertmanagerEndpoint is used to get the URL for alertmanager +func GetAlertmanagerEndpoint(client client.Client, namespace string) (string, error) { + found := &routev1.Route{} + + err := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteName, Namespace: namespace}, found) + if err != nil && errors.IsNotFound(err) { + // if the alertmanager router is not created yet, fallback to get host from the domain of ingresscontroller + domain, err := getDomainForIngressController(client, OpenshiftIngressOperatorCRName, OpenshiftIngressOperatorNamespace) + if err != nil { + return "", nil + } + return AlertmanagerRouteName + "-" + namespace + "." + domain, nil + } else if err != nil { + return "", err + } + return found.Spec.Host, nil +} + +// getDomainForIngressController get the domain for the given ingresscontroller instance +func getDomainForIngressController(client client.Client, name, namespace string) (string, error) { + ingressOperatorInstance := &operatorv1.IngressController{} + err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, ingressOperatorInstance) + if err != nil { + return "", err + } + domain := ingressOperatorInstance.Status.Domain + if domain == "" { + return "", fmt.Errorf("no domain found in the ingressOperator: %s/%s.", namespace, name) + } + return domain, nil +} + +// GetAlertmanagerRouterCA is used to get the CA of openshift Route +func GetAlertmanagerRouterCA(client client.Client) (string, error) { + amRouteBYOCaSrt := &corev1.Secret{} + amRouteBYOCertSrt := &corev1.Secret{} + err1 := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteBYOCAName, Namespace: GetDefaultNamespace()}, amRouteBYOCaSrt) + err2 := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteBYOCERTName, Namespace: GetDefaultNamespace()}, amRouteBYOCertSrt) + if err1 == nil && err2 == nil { + return string(amRouteBYOCaSrt.Data["tls.crt"]), nil + } + + ingressOperator := &operatorv1.IngressController{} + err := client.Get(context.TODO(), types.NamespacedName{Name: OpenshiftIngressOperatorCRName, Namespace: OpenshiftIngressOperatorNamespace}, ingressOperator) + if err != nil { + return "", err + } + + routerCASrtName := OpenshiftIngressDefaultCertName + // check if custom default certificate is provided or not + if ingressOperator.Spec.DefaultCertificate != nil { + routerCASrtName = ingressOperator.Spec.DefaultCertificate.Name + } + + routerCASecret := &corev1.Secret{} + err = client.Get(context.TODO(), types.NamespacedName{Name: routerCASrtName, Namespace: OpenshiftIngressNamespace}, routerCASecret) + if err != nil { + return "", err + } + return string(routerCASecret.Data["tls.crt"]), nil +} + +// GetAlertmanagerCA is used to get the CA of Alertmanager +func GetAlertmanagerCA(client client.Client) (string, error) { + amCAConfigmap := &corev1.ConfigMap{} + err := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagersDefaultCaBundleName, Namespace: GetDefaultNamespace()}, amCAConfigmap) + if err != nil { + return "", err + } + return string(amCAConfigmap.Data["service-ca.crt"]), nil +} + +func GetDefaultNamespace() string { + return defaultNamespace +} + +// GetMonitoringCRName returns monitoring cr name +func GetMonitoringCRName() string { + return monitoringCRName +} + +// SetMonitoringCRName sets the cr name +func SetMonitoringCRName(crName string) { + monitoringCRName = crName +} + +func infrastructureConfigNameNsN() types.NamespacedName { + return types.NamespacedName{ + Name: infrastructureConfigName, + } +} + +// GetKubeAPIServerAddress is used to get the api server url +func GetKubeAPIServerAddress(client client.Client) (string, error) { + infraConfig := &ocinfrav1.Infrastructure{} + if err := client.Get(context.TODO(), infrastructureConfigNameNsN(), infraConfig); err != nil { + return "", err + } + + return infraConfig.Status.APIServerURL, nil +} + +// GetClusterID is used to get the cluster uid +func GetClusterID(ocpClient ocpClientSet.Interface) (string, error) { + clusterVersion, err := ocpClient.ConfigV1().ClusterVersions().Get(context.TODO(), "version", v1.GetOptions{}) + if err != nil { + log.Error(err, "Failed to get clusterVersion") + return "", err + } + + return string(clusterVersion.Spec.ClusterID), nil +} + +// checkIsIBMCloud detects if the current cloud vendor is ibm or not +// we know we are on OCP already, so if it's also ibm cloud, it's roks +func CheckIsIBMCloud(c client.Client) (bool, error) { + nodes := &corev1.NodeList{} + err := c.List(context.TODO(), nodes) + if err != nil { + log.Error(err, "Failed to get nodes list") + return false, err + } + if len(nodes.Items) == 0 { + log.Error(err, "Failed to list any nodes") + return false, nil + } + + providerID := nodes.Items[0].Spec.ProviderID + if strings.Contains(providerID, "ibm") { + return true, nil + } + + return false, nil +} + +// GetDefaultCRName is used to get default CR name. +func GetDefaultCRName() string { + return defaultCRName +} + +// IsPaused returns true if the multiclusterobservability instance is labeled as paused, and false otherwise +func IsPaused(annotations map[string]string) bool { + if annotations == nil { + return false + } + + if annotations[AnnotationMCOPause] != "" && + strings.EqualFold(annotations[AnnotationMCOPause], "true") { + return true + } + + return false +} + +// WithoutResourcesRequests returns true if the multiclusterobservability instance has annotation: +// mco-thanos-without-resources-requests: "true" +// This is just for test purpose: the KinD cluster does not have enough resources for the requests. +// We won't expose this annotation to the customer. +func WithoutResourcesRequests(annotations map[string]string) bool { + if annotations == nil { + return false + } + + if annotations[AnnotationMCOWithoutResourcesRequests] != "" && + strings.EqualFold(annotations[AnnotationMCOWithoutResourcesRequests], "true") { + return true + } + + return false +} + +// GetTenantUID returns tenant uid +func GetTenantUID() string { + if tenantUID == "" { + tenantUID = string(uuid.NewUUID()) + } + return tenantUID +} + +// GetObsAPISvc returns observatorium api service +func GetObsAPISvc(instanceName string) string { + return instanceName + "-observatorium-api." + defaultNamespace + ".svc.cluster.local" +} + +// SetCustomRuleConfigMap set true if there is custom rule configmap +func SetCustomRuleConfigMap(hasConfigMap bool) { + hasCustomRuleConfigMap = hasConfigMap +} + +// HasCustomRuleConfigMap returns true if there is custom rule configmap +func HasCustomRuleConfigMap() bool { + return hasCustomRuleConfigMap +} + +func GetCertDuration() time.Duration { + return certDuration +} + +func SetCertDuration(annotations map[string]string) { + if annotations != nil && annotations[AnnotationCertDuration] != "" { + d, err := time.ParseDuration(annotations[AnnotationCertDuration]) + if err != nil { + log.Error(err, "Failed to parse cert duration, use default one", "annotation", annotations[AnnotationCertDuration]) + } else { + certDuration = d + return + } + } + certDuration = time.Hour * 24 * 365 +} + +func GetOperandNamePrefix() string { + return operandNamePrefix +} + +func GetImagePullPolicy(mco observabilityv1beta2.MultiClusterObservabilitySpec) corev1.PullPolicy { + if mco.ImagePullPolicy != "" { + return mco.ImagePullPolicy + } else { + return DefaultImagePullPolicy + } +} + +func GetImagePullSecret(mco observabilityv1beta2.MultiClusterObservabilitySpec) string { + if mco.ImagePullSecret != "" { + return mco.ImagePullSecret + } else { + return DefaultImagePullSecret + } +} + +func getDefaultResource(resourceType string, resource corev1.ResourceName, + component string) string { + //No provide the default limits + if resourceType == ResourceLimits && component != Grafana { + return "" + } + switch component { + case ObservatoriumAPI: + if resource == corev1.ResourceCPU { + return ObservatoriumAPICPURequets + } + if resource == corev1.ResourceMemory { + return ObservatoriumAPIMemoryRequets + } + case ThanosCompact: + if resource == corev1.ResourceCPU { + return ThanosCompactCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosCompactMemoryRequets + } + case ThanosQuery: + if resource == corev1.ResourceCPU { + return ThanosQueryCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosQueryMemoryRequets + } + case ThanosQueryFrontend: + if resource == corev1.ResourceCPU { + return ThanosQueryFrontendCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosQueryFrontendMemoryRequets + } + case ThanosRule: + if resource == corev1.ResourceCPU { + return ThanosRuleCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosRuleMemoryRequets + } + case ThanosReceive: + if resource == corev1.ResourceCPU { + return ThanosReceiveCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosReceiveMemoryRequets + } + case ThanosStoreShard: + if resource == corev1.ResourceCPU { + return ThanosStoreCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosStoreMemoryRequets + } + case ThanosQueryFrontendMemcached, ThanosStoreMemcached: + if resource == corev1.ResourceCPU { + return ThanosCachedCPURequets + } + if resource == corev1.ResourceMemory { + return ThanosCachedMemoryRequets + } + case MemcachedExporter: + if resource == corev1.ResourceCPU { + return MemcachedExporterCPURequets + } + if resource == corev1.ResourceMemory { + return MemcachedExporterMemoryRequets + } + case RBACQueryProxy: + if resource == corev1.ResourceCPU { + return RBACQueryProxyCPURequets + } + if resource == corev1.ResourceMemory { + return RBACQueryProxyMemoryRequets + } + case MetricsCollector: + if resource == corev1.ResourceCPU { + return MetricsCollectorCPURequets + } + if resource == corev1.ResourceMemory { + return MetricsCollectorMemoryRequets + } + case Alertmanager: + if resource == corev1.ResourceCPU { + return AlertmanagerCPURequets + } + if resource == corev1.ResourceMemory { + return AlertmanagerMemoryRequets + } + case Grafana: + if resourceType == ResourceRequests { + if resource == corev1.ResourceCPU { + return GrafanaCPURequets + } + if resource == corev1.ResourceMemory { + return GrafanaMemoryRequets + } + } else if resourceType == ResourceLimits { + if resource == corev1.ResourceCPU { + return GrafanaCPULimits + } + if resource == corev1.ResourceMemory { + return GrafanaMemoryLimits + } + } + } + return "" +} + +func getResource(resourceType string, resource corev1.ResourceName, + component string, advanced *observabilityv1beta2.AdvancedConfig) string { + if advanced == nil { + return getDefaultResource(resourceType, resource, component) + } + var resourcesReq *corev1.ResourceRequirements + switch component { + case ObservatoriumAPI: + if advanced.ObservatoriumAPI != nil { + resourcesReq = advanced.ObservatoriumAPI.Resources + } + case ThanosCompact: + if advanced.Compact != nil { + resourcesReq = advanced.Compact.Resources + } + case ThanosQuery: + if advanced.Query != nil { + resourcesReq = advanced.Query.Resources + } + case ThanosQueryFrontend: + if advanced.QueryFrontend != nil { + resourcesReq = advanced.QueryFrontend.Resources + } + case ThanosQueryFrontendMemcached: + if advanced.QueryFrontendMemcached != nil { + resourcesReq = advanced.QueryFrontendMemcached.CommonSpec.Resources + } + case ThanosRule: + if advanced.Rule != nil { + resourcesReq = advanced.Rule.Resources + } + case ThanosReceive: + if advanced.Receive != nil { + resourcesReq = advanced.Receive.Resources + } + case ThanosStoreMemcached: + if advanced.StoreMemcached != nil { + resourcesReq = advanced.StoreMemcached.CommonSpec.Resources + } + case ThanosStoreShard: + if advanced.Store != nil { + resourcesReq = advanced.Store.Resources + } + case RBACQueryProxy: + if advanced.RBACQueryProxy != nil { + resourcesReq = advanced.RBACQueryProxy.Resources + } + case Grafana: + if advanced.Grafana != nil { + resourcesReq = advanced.Grafana.Resources + } + case Alertmanager: + if advanced.Alertmanager != nil { + resourcesReq = advanced.Alertmanager.Resources + } + } + + if resourcesReq != nil { + if resourceType == ResourceRequests { + if len(resourcesReq.Requests) != 0 { + if resource == corev1.ResourceCPU { + return resourcesReq.Requests.Cpu().String() + } else if resource == corev1.ResourceMemory { + return resourcesReq.Requests.Memory().String() + } else { + return getDefaultResource(resourceType, resource, component) + } + } else { + return getDefaultResource(resourceType, resource, component) + } + } + if resourceType == ResourceLimits { + if len(resourcesReq.Limits) != 0 { + if resource == corev1.ResourceCPU { + return resourcesReq.Limits.Cpu().String() + } else if resource == corev1.ResourceMemory { + return resourcesReq.Limits.Memory().String() + } else { + return getDefaultResource(resourceType, resource, component) + } + } else { + return getDefaultResource(resourceType, resource, component) + } + } + } else { + return getDefaultResource(resourceType, resource, component) + } + return "" +} + +func GetResources(component string, advanced *observabilityv1beta2.AdvancedConfig) corev1.ResourceRequirements { + + cpuRequests := getResource(ResourceRequests, corev1.ResourceCPU, component, advanced) + cpuLimits := getResource(ResourceLimits, corev1.ResourceCPU, component, advanced) + memoryRequests := getResource(ResourceRequests, corev1.ResourceMemory, component, advanced) + memoryLimits := getResource(ResourceLimits, corev1.ResourceMemory, component, advanced) + + resourceReq := corev1.ResourceRequirements{} + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} + if cpuRequests == "0" { + cpuRequests = getDefaultResource(ResourceRequests, corev1.ResourceCPU, component) + } + if cpuRequests != "" { + requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(cpuRequests) + } + + if memoryRequests == "0" { + memoryRequests = getDefaultResource(ResourceRequests, corev1.ResourceMemory, component) + } + if memoryRequests != "" { + requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(memoryRequests) + } + + if cpuLimits == "0" { + cpuLimits = getDefaultResource(ResourceLimits, corev1.ResourceCPU, component) + } + if cpuLimits != "" { + limits[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(cpuLimits) + } + + if memoryLimits == "0" { + memoryLimits = getDefaultResource(ResourceLimits, corev1.ResourceMemory, component) + } + if memoryLimits != "" { + limits[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(memoryLimits) + } + resourceReq.Limits = limits + resourceReq.Requests = requests + + return resourceReq +} + +func GetOBAResources(oba *mcoshared.ObservabilityAddonSpec) *corev1.ResourceRequirements { + cpuRequests := MetricsCollectorCPURequets + cpuLimits := MetricsCollectorCPULimits + memoryRequests := MetricsCollectorMemoryRequets + memoryLimits := MetricsCollectorMemoryLimits + + if oba.Resources != nil { + if len(oba.Resources.Requests) != 0 { + if oba.Resources.Requests.Cpu().String() != "0" { + cpuRequests = oba.Resources.Requests.Cpu().String() + } + if oba.Resources.Requests.Memory().String() != "0" { + memoryRequests = oba.Resources.Requests.Memory().String() + } + } + if len(oba.Resources.Limits) != 0 { + if oba.Resources.Limits.Cpu().String() != "0" { + cpuLimits = oba.Resources.Limits.Cpu().String() + } + if oba.Resources.Limits.Memory().String() != "0" { + memoryLimits = oba.Resources.Limits.Memory().String() + } + } + } + + resourceReq := &corev1.ResourceRequirements{} + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} + if cpuRequests != "" { + requests[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(cpuRequests) + } + if memoryRequests != "" { + requests[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(memoryRequests) + } + if cpuLimits != "" { + limits[corev1.ResourceName(corev1.ResourceCPU)] = resource.MustParse(cpuLimits) + } + if memoryLimits != "" { + limits[corev1.ResourceName(corev1.ResourceMemory)] = resource.MustParse(memoryLimits) + } + resourceReq.Limits = limits + resourceReq.Requests = requests + + return resourceReq +} + +func GetOperandName(name string) string { + log.V(1).Info("operand is", "key", name, "name", operandNames[name]) + return operandNames[name] +} + +func SetOperandNames(c client.Client) error { + if len(operandNames) != 0 { + return nil + } + //set the default values. + operandNames[Grafana] = GetOperandNamePrefix() + Grafana + operandNames[RBACQueryProxy] = GetOperandNamePrefix() + RBACQueryProxy + operandNames[Alertmanager] = GetOperandNamePrefix() + Alertmanager + operandNames[ObservatoriumOperator] = GetOperandNamePrefix() + ObservatoriumOperator + operandNames[Observatorium] = GetDefaultCRName() + operandNames[ObservatoriumAPI] = GetOperandNamePrefix() + ObservatoriumAPI + + // Check if the Observatorium CR already exists + opts := &client.ListOptions{ + Namespace: GetDefaultNamespace(), + } + + observatoriumList := &obsv1alpha1.ObservatoriumList{} + err := c.List(context.TODO(), observatoriumList, opts) + if err != nil { + return err + } + if len(observatoriumList.Items) != 0 { + for _, observatorium := range observatoriumList.Items { + for _, ownerRef := range observatorium.OwnerReferences { + if ownerRef.Kind == "MultiClusterObservability" && ownerRef.Name == GetMonitoringCRName() { + if observatorium.Name != GetDefaultCRName() { + // this is for upgrade case. + operandNames[Grafana] = Grafana + operandNames[RBACQueryProxy] = RBACQueryProxy + operandNames[Alertmanager] = Alertmanager + operandNames[ObservatoriumOperator] = ObservatoriumOperator + operandNames[Observatorium] = observatorium.Name + operandNames[ObservatoriumAPI] = observatorium.Name + "-" + ObservatoriumAPI + } + break + } + } + } + } + + return nil +} + +// CleanUpOperandNames delete all the operand name items +// Should be called when the MCO CR is deleted +func CleanUpOperandNames() { + for k := range operandNames { + delete(operandNames, k) + } +} + +// GetValidatingWebhookConfigurationForMCO return the ValidatingWebhookConfiguration for the MCO validaing webhook +func GetValidatingWebhookConfigurationForMCO() *admissionregistrationv1.ValidatingWebhookConfiguration { + validatingWebhookPath := "/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability" + noSideEffects := admissionregistrationv1.SideEffectClassNone + allScopeType := admissionregistrationv1.AllScopes + webhookServiceNamespace := GetMCONamespace() + webhookServicePort := int32(443) + return &admissionregistrationv1.ValidatingWebhookConfiguration{ + ObjectMeta: v1.ObjectMeta{ + Name: ValidatingWebhookConfigurationName, + Labels: map[string]string{ + "name": ValidatingWebhookConfigurationName, + }, + Annotations: map[string]string{ + "service.beta.openshift.io/inject-cabundle": "true", + }, + }, + Webhooks: []admissionregistrationv1.ValidatingWebhook{ + { + AdmissionReviewVersions: []string{"v1", "v1beta1"}, + Name: "vmulticlusterobservability.observability.open-cluster-management.io", + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: WebhookServiceName, + Namespace: webhookServiceNamespace, + Path: &validatingWebhookPath, + Port: &webhookServicePort, + }, + CABundle: []byte(""), + }, + SideEffects: &noSideEffects, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{"observability.open-cluster-management.io"}, + APIVersions: []string{"v1beta2"}, + Resources: []string{"multiclusterobservabilities"}, + Scope: &allScopeType, + }, + }, + }, + }, + }, + } +} diff --git a/operators/multiclusterobservability/pkg/config/config_test.go b/operators/multiclusterobservability/pkg/config/config_test.go new file mode 100644 index 000000000..4957f957b --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/config_test.go @@ -0,0 +1,1040 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "fmt" + "os" + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + routev1 "github.com/openshift/api/route/v1" + fakeconfigclient "github.com/openshift/client-go/config/clientset/versioned/fake" + observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" +) + +var ( + apiServerURL = "http://example.com" + clusterID = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + version = "2.1.1" + DefaultDSImgRepository = "quay.io:443/acm-d" +) + +func TestGetClusterNameLabelKey(t *testing.T) { + clusterName := GetClusterNameLabelKey() + if clusterName != clusterNameLabelKey { + t.Errorf("Cluster Label (%v) is not the expected (%v)", clusterName, clusterNameLabelKey) + } +} + +func TestReplaceImage(t *testing.T) { + + caseList := []struct { + annotations map[string]string + name string + imageRepo string + expected bool + cm map[string]string + }{ + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + "mco-test-image": "test.org/test:latest", + }, + name: "Replace image for test purpose", + imageRepo: "test.org", + expected: true, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + AnnotationKeyImageTagSuffix: "test", + }, + name: "Image is in different org", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + }, + name: "Image is in different org", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + AnnotationKeyImageTagSuffix: "2.3.0-SNAPSHOT-2021-07-26-18-43-26", + }, + name: "Image is in the same org", + imageRepo: DefaultImgRepository, + expected: true, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + }, + name: "Image is in the same org", + imageRepo: DefaultImgRepository, + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + }, + name: "Image is in the same org", + imageRepo: DefaultImgRepository, + expected: true, + cm: map[string]string{ + "test": "test.org", + }, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultDSImgRepository, + AnnotationKeyImageTagSuffix: "2.3.0-SNAPSHOT-2021-07-26-18-43-26", + }, + name: "Image is from the ds build", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultDSImgRepository, + }, + name: "Image is from the ds build", + imageRepo: "test.org", + expected: true, + cm: map[string]string{ + "test": "test.org", + }, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultDSImgRepository, + }, + name: "Image is from the ds build", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{ + AnnotationKeyImageRepository: "", + AnnotationKeyImageTagSuffix: "", + }, + name: "the img repo is empty", + imageRepo: "", + expected: false, + cm: nil, + }, + + { + annotations: map[string]string{}, + name: "no img info", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + + { + annotations: nil, + name: "annotations is nil", + imageRepo: "test.org", + expected: false, + cm: nil, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + SetImageManifests(c.cm) + output, _ := ReplaceImage(c.annotations, c.imageRepo, "test") + if output != c.expected { + t.Errorf("case (%v) output (%v) is not the expected (%v)", c.name, output, c.expected) + } + }) + } +} + +func TestGetDefaultTenantName(t *testing.T) { + tenantName := GetDefaultTenantName() + if tenantName != defaultTenantName { + t.Errorf("Tenant name (%v) is not the expected (%v)", tenantName, defaultTenantName) + } +} + +func TestGetDefaultNamespace(t *testing.T) { + expected := "open-cluster-management-observability" + if GetDefaultNamespace() != expected { + t.Errorf("Default Namespace (%v) is not the expected (%v)", GetDefaultNamespace(), expected) + } +} + +func TestMonitoringCRName(t *testing.T) { + var monitoringCR = "monitoring" + SetMonitoringCRName(monitoringCR) + + if monitoringCR != GetMonitoringCRName() { + t.Errorf("Monitoring CR Name (%v) is not the expected (%v)", GetMonitoringCRName(), monitoringCR) + } +} + +func TestGetKubeAPIServerAddress(t *testing.T) { + inf := &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: infrastructureConfigName}, + Status: configv1.InfrastructureStatus{ + APIServerURL: apiServerURL, + }, + } + scheme := runtime.NewScheme() + scheme.AddKnownTypes(configv1.GroupVersion, inf) + client := fake.NewFakeClientWithScheme(scheme, inf) + apiURL, _ := GetKubeAPIServerAddress(client) + if apiURL != apiServerURL { + t.Errorf("Kubenetes API Server Address (%v) is not the expected (%v)", apiURL, apiServerURL) + } +} + +func TestGetClusterIDSuccess(t *testing.T) { + version := &configv1.ClusterVersion{ + ObjectMeta: metav1.ObjectMeta{Name: "version"}, + Spec: configv1.ClusterVersionSpec{ + ClusterID: configv1.ClusterID(clusterID), + }, + } + client := fakeconfigclient.NewSimpleClientset(version) + tmpClusterID, _ := GetClusterID(client) + if tmpClusterID != clusterID { + t.Errorf("OCP ClusterID (%v) is not the expected (%v)", tmpClusterID, clusterID) + } +} + +func TestGetClusterIDFailed(t *testing.T) { + inf := &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: infrastructureConfigName}, + Status: configv1.InfrastructureStatus{ + APIServerURL: apiServerURL, + }, + } + client := fakeconfigclient.NewSimpleClientset(inf) + _, err := GetClusterID(client) + if err == nil { + t.Errorf("Should throw the error since there is no clusterversion defined") + } +} + +func TestGetObsAPIHost(t *testing.T) { + route := &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Name: obsAPIGateway, + Namespace: "test", + }, + Spec: routev1.RouteSpec{ + Host: apiServerURL, + }, + } + scheme := runtime.NewScheme() + scheme.AddKnownTypes(routev1.GroupVersion, route) + client := fake.NewFakeClientWithScheme(scheme, route) + + host, _ := GetObsAPIHost(client, "default") + if host == apiServerURL { + t.Errorf("Should not get route host in default namespace") + } + host, _ = GetObsAPIHost(client, "test") + if host != apiServerURL { + t.Errorf("Observatorium api (%v) is not the expected (%v)", host, apiServerURL) + } +} + +func TestIsPaused(t *testing.T) { + caseList := []struct { + annotations map[string]string + expected bool + name string + }{ + { + name: "without mco-pause", + annotations: map[string]string{ + AnnotationKeyImageRepository: DefaultImgRepository, + AnnotationKeyImageTagSuffix: "test", + }, + expected: false, + }, + { + name: "mco-pause is empty", + annotations: map[string]string{ + AnnotationMCOPause: "", + }, + expected: false, + }, + { + name: "mco-pause is false", + annotations: map[string]string{ + AnnotationMCOPause: "false", + }, + expected: false, + }, + { + name: "mco-pause is true", + annotations: map[string]string{ + AnnotationMCOPause: "true", + }, + expected: true, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + output := IsPaused(c.annotations) + if output != c.expected { + t.Errorf("case (%v) output (%v) is not the expected (%v)", c.name, output, c.expected) + } + }) + } +} + +func NewFakeClient(mco *mcov1beta2.MultiClusterObservability, + obs *observatoriumv1alpha1.Observatorium) client.Client { + s := scheme.Scheme + s.AddKnownTypes(mcov1beta2.GroupVersion, mco) + s.AddKnownTypes(observatoriumv1alpha1.GroupVersion, obs) + objs := []runtime.Object{mco, obs} + return fake.NewFakeClientWithScheme(s, objs...) +} + +func TestReadImageManifestConfigMap(t *testing.T) { + var buildTestImageManifestCM func(ns, version string) *corev1.ConfigMap + buildTestImageManifestCM = func(ns, version string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ImageManifestConfigMapNamePrefix + version, + Namespace: ns, + Labels: map[string]string{ + OCMManifestConfigMapTypeLabelKey: OCMManifestConfigMapTypeLabelValue, + OCMManifestConfigMapVersionLabelKey: version, + }, + }, + Data: map[string]string{ + "test-key": fmt.Sprintf("test-value:%s", version), + }, + } + } + + ns := "testing" + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + caseList := []struct { + name string + inputCMList []string + version string + expectedData map[string]string + expectedRet bool + preFunc func() + }{ + { + name: "no image manifest configmap", + inputCMList: []string{}, + version: "2.3.0", + expectedRet: false, + expectedData: map[string]string{}, + preFunc: func() { + os.Setenv("POD_NAMESPACE", ns) + SetImageManifests(map[string]string{}) + }, + }, + { + name: "single valid image manifest configmap", + inputCMList: []string{"2.2.3"}, + version: "2.3.0", + expectedRet: false, + expectedData: map[string]string{}, + preFunc: func() { + os.Setenv("POD_NAMESPACE", ns) + SetImageManifests(map[string]string{}) + }, + }, + { + name: "multiple valid image manifest configmaps", + inputCMList: []string{"2.2.3", "2.3.0"}, + version: "2.3.0", + expectedRet: true, + expectedData: map[string]string{ + "test-key": "test-value:2.3.0", + }, + preFunc: func() { + os.Setenv("POD_NAMESPACE", ns) + SetImageManifests(map[string]string{}) + }, + }, + { + name: "multiple image manifest configmaps with invalid", + inputCMList: []string{"2.2.3", "2.3.0", "invalid"}, + version: "2.3.0", + expectedRet: true, + expectedData: map[string]string{ + "test-key": "test-value:2.3.0", + }, + preFunc: func() { + os.Setenv("POD_NAMESPACE", ns) + SetImageManifests(map[string]string{}) + }, + }, + { + name: "valid image manifest configmaps with no namespace set", + inputCMList: []string{"2.2.3", "2.3.0"}, + version: "2.3.0", + expectedRet: false, + expectedData: map[string]string{}, + preFunc: func() { + os.Unsetenv("POD_NAMESPACE") + SetImageManifests(map[string]string{}) + }, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + c.preFunc() + initObjs := []runtime.Object{} + for _, cmName := range c.inputCMList { + initObjs = append(initObjs, buildTestImageManifestCM(ns, cmName)) + } + client := fake.NewFakeClientWithScheme(scheme, initObjs...) + + gotRet, err := ReadImageManifestConfigMap(client, c.version) + if err != nil { + t.Errorf("Failed read image manifest configmap due to %v", err) + } + if gotRet != c.expectedRet { + t.Errorf("case (%v) output (%v) is not the expected (%v)", c.name, gotRet, c.expectedRet) + } + if !reflect.DeepEqual(GetImageManifests(), c.expectedData) { + t.Errorf("case (%v) output (%v) is not the expected (%v)", c.name, GetImageManifests(), c.expectedData) + } + }) + } +} + +func Test_checkIsIBMCloud(t *testing.T) { + s := scheme.Scheme + nodeIBM := &corev1.Node{ + Spec: corev1.NodeSpec{ + ProviderID: "ibm", + }, + } + nodeOther := &corev1.Node{} + + type args struct { + client client.Client + name string + } + caselist := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "is normal ocp", + args: args{ + client: fake.NewFakeClientWithScheme(s, []runtime.Object{nodeOther}...), + name: "test-secret", + }, + want: false, + wantErr: false, + }, + { + name: "is ibm", + args: args{ + client: fake.NewFakeClientWithScheme(s, []runtime.Object{nodeIBM}...), + name: "test-secret", + }, + want: true, + wantErr: false, + }, + } + for _, c := range caselist { + t.Run(c.name, func(t *testing.T) { + got, err := CheckIsIBMCloud(c.args.client) + if (err != nil) != c.wantErr { + t.Errorf("checkIsIBMCloud() error = %v, wantErr %v", err, c.wantErr) + return + } + if !reflect.DeepEqual(got, c.want) { + t.Errorf("checkIsIBMCloud() = %v, want %v", got, c.want) + } + }) + } +} + +func TestGetResources(t *testing.T) { + caseList := []struct { + name string + componentName string + raw *mcov1beta2.AdvancedConfig + result func(resources corev1.ResourceRequirements) bool + }{ + { + name: "Have requests defined in resources", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == "1" && + resources.Requests.Memory().String() == "1Gi" && + resources.Limits.Cpu().String() == "0" && + resources.Limits.Memory().String() == "0" + }, + }, + { + name: "Have limits defined in resources", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ObservatoriumAPICPURequets && + resources.Requests.Memory().String() == ObservatoriumAPIMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "1Gi" + }, + }, + { + name: "Have limits defined in resources", + componentName: RBACQueryProxy, + raw: &mcov1beta2.AdvancedConfig{ + RBACQueryProxy: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == RBACQueryProxyCPURequets && + resources.Requests.Memory().String() == RBACQueryProxyMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "1Gi" + }, + }, + { + name: "Have requests and limits defined in requests", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == "1" && + resources.Requests.Memory().String() == "1Gi" && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "1Gi" + }, + }, + { + name: "No CPU defined in requests", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ObservatoriumAPICPURequets && + resources.Requests.Memory().String() == ObservatoriumAPIMemoryRequets && + resources.Limits.Cpu().String() == "0" && resources.Limits.Memory().String() == "0" + }, + }, + { + name: "No requests defined in resources", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{}, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ObservatoriumAPICPURequets && + resources.Requests.Memory().String() == ObservatoriumAPIMemoryRequets && + resources.Limits.Cpu().String() == "0" && resources.Limits.Memory().String() == "0" + }, + }, + { + name: "No resources defined", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{}, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ObservatoriumAPICPURequets && + resources.Requests.Memory().String() == ObservatoriumAPIMemoryRequets && + resources.Limits.Cpu().String() == "0" && resources.Limits.Memory().String() == "0" + }, + }, + { + name: "No advanced defined", + componentName: ObservatoriumAPI, + raw: nil, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ObservatoriumAPICPURequets && + resources.Requests.Memory().String() == ObservatoriumAPIMemoryRequets && + resources.Limits.Cpu().String() == "0" && resources.Limits.Memory().String() == "0" + }, + }, + { + name: "No advanced defined", + componentName: Grafana, + raw: nil, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == GrafanaCPURequets && + resources.Requests.Memory().String() == GrafanaMemoryRequets && + resources.Limits.Cpu().String() == GrafanaCPULimits && + resources.Limits.Memory().String() == GrafanaMemoryLimits + }, + }, + { + name: "Have requests defined", + componentName: Grafana, + raw: &mcov1beta2.AdvancedConfig{ + Grafana: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == "1" && + resources.Requests.Memory().String() == GrafanaMemoryRequets && + resources.Limits.Cpu().String() == GrafanaCPULimits && + resources.Limits.Memory().String() == GrafanaMemoryLimits + }, + }, + { + name: "Have limits defined", + componentName: Grafana, + raw: &mcov1beta2.AdvancedConfig{ + Grafana: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == GrafanaCPURequets && + resources.Requests.Memory().String() == GrafanaMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == GrafanaMemoryLimits + }, + }, + { + name: "Have limits defined", + componentName: Grafana, + raw: &mcov1beta2.AdvancedConfig{ + Grafana: &mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == GrafanaCPURequets && + resources.Requests.Memory().String() == GrafanaMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "1Gi" + }, + }, + { + name: "Have limits defined", + componentName: ThanosQueryFrontendMemcached, + raw: &mcov1beta2.AdvancedConfig{ + QueryFrontendMemcached: &mcov1beta2.CacheConfig{ + CommonSpec: mcov1beta2.CommonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == ThanosCachedCPURequets && + resources.Requests.Memory().String() == ThanosCachedMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "1Gi" + }, + }, + } + + for _, c := range caseList { + t.Run(c.componentName+":"+c.name, func(t *testing.T) { + resources := GetResources(c.componentName, c.raw) + if !c.result(resources) { + t.Errorf("case (%v) output (%v) is not the expected", c.componentName+":"+c.name, resources) + } + }) + } +} + +func TestGetReplicas(t *testing.T) { + var replicas0 int32 = 0 + caseList := []struct { + name string + componentName string + raw *mcov1beta2.AdvancedConfig + result func(replicas *int32) bool + }{ + { + name: "Have replicas defined", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Replicas: &Replicas1, + }, + }, + result: func(replicas *int32) bool { + return replicas == &Replicas1 + }, + }, + { + name: "Do not allow to set 0", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{ + Replicas: &replicas0, + }, + }, + result: func(replicas *int32) bool { + return replicas == &Replicas2 + }, + }, + { + name: "No advanced defined", + componentName: ObservatoriumAPI, + raw: nil, + result: func(replicas *int32) bool { + return replicas == &Replicas2 + }, + }, + { + name: "No replicas defined", + componentName: ObservatoriumAPI, + raw: &mcov1beta2.AdvancedConfig{ + ObservatoriumAPI: &mcov1beta2.CommonSpec{}, + }, + result: func(replicas *int32) bool { + return replicas == &Replicas2 + }, + }, + } + for _, c := range caseList { + t.Run(c.componentName+":"+c.name, func(t *testing.T) { + replicas := GetReplicas(c.componentName, c.raw) + if !c.result(replicas) { + t.Errorf("case (%v) output (%v) is not the expected", c.componentName+":"+c.name, replicas) + } + }) + } +} + +func TestGetOBAResources(t *testing.T) { + caseList := []struct { + name string + componentName string + raw *mcoshared.ObservabilityAddonSpec + result func(resources corev1.ResourceRequirements) bool + }{ + { + name: "Have requests defined", + componentName: ObservatoriumAPI, + raw: &mcoshared.ObservabilityAddonSpec{ + Resources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == "1" && + resources.Requests.Memory().String() == "1Gi" && + resources.Limits.Cpu().String() == "0" && + resources.Limits.Memory().String() == "0" + }, + }, + { + name: "Have limits defined", + componentName: ObservatoriumAPI, + raw: &mcoshared.ObservabilityAddonSpec{ + Resources: &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == MetricsCollectorCPURequets && + resources.Requests.Memory().String() == MetricsCollectorMemoryRequets && + resources.Limits.Cpu().String() == "1" && + resources.Limits.Memory().String() == "0" + }, + }, + { + name: "no resources defined", + componentName: ObservatoriumAPI, + raw: &mcoshared.ObservabilityAddonSpec{ + Resources: &corev1.ResourceRequirements{}, + }, + result: func(resources corev1.ResourceRequirements) bool { + return resources.Requests.Cpu().String() == MetricsCollectorCPURequets && + resources.Requests.Memory().String() == MetricsCollectorMemoryRequets && + resources.Limits.Cpu().String() == "0" && + resources.Limits.Memory().String() == "0" + }, + }, + } + for _, c := range caseList { + t.Run(c.componentName+":"+c.name, func(t *testing.T) { + resources := GetOBAResources(c.raw) + if !c.result(*resources) { + t.Errorf("case (%v) output (%v) is not the expected", c.componentName+":"+c.name, resources) + } + }) + } +} + +func TestGetOperandName(t *testing.T) { + caseList := []struct { + name string + componentName string + prepare func() + result func() bool + }{ + { + name: "No Observatorium CR", + componentName: Alertmanager, + prepare: func() { + SetOperandNames(fake.NewFakeClientWithScheme(runtime.NewScheme())) + }, + result: func() bool { + return GetOperandName(Alertmanager) == GetOperandNamePrefix()+"alertmanager" + }, + }, + { + name: "Have Observatorium CR without ownerreference", + componentName: Alertmanager, + prepare: func() { + //clean the operandNames map + CleanUpOperandNames() + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: GetDefaultCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + }, + }, + } + + observatorium := &observatoriumv1alpha1.Observatorium{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetOperandNamePrefix() + "-observatorium", + Namespace: GetDefaultNamespace(), + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + client := fake.NewFakeClientWithScheme(s, []runtime.Object{mco, observatorium}...) + SetMonitoringCRName(GetDefaultCRName()) + SetOperandNames(client) + }, + result: func() bool { + return GetOperandName(Alertmanager) == GetOperandNamePrefix()+Alertmanager && + GetOperandName(Grafana) == GetOperandNamePrefix()+Grafana && + GetOperandName(Observatorium) == GetDefaultCRName() + }, + }, + { + name: "Have Observatorium CR (observability-observatorium) with ownerreference", + componentName: Alertmanager, + prepare: func() { + //clean the operandNames map + CleanUpOperandNames() + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: GetDefaultCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + }, + }, + } + + observatorium := &observatoriumv1alpha1.Observatorium{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetOperandNamePrefix() + "observatorium", + Namespace: GetDefaultNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "MultiClusterObservability", + Name: GetDefaultCRName(), + }, + }, + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + client := fake.NewFakeClientWithScheme(s, []runtime.Object{mco, observatorium}...) + + SetMonitoringCRName(GetDefaultCRName()) + SetOperandNames(client) + }, + result: func() bool { + return GetOperandName(Alertmanager) == Alertmanager && + GetOperandName(Grafana) == Grafana && + GetOperandName(Observatorium) == GetOperandNamePrefix()+"observatorium" + }, + }, + { + name: "Have Observatorium CR (observability) with ownerreference", + componentName: Alertmanager, + prepare: func() { + //clean the operandNames map + CleanUpOperandNames() + mco := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Name: GetDefaultCRName(), + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + }, + }, + } + + observatorium := &observatoriumv1alpha1.Observatorium{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetDefaultCRName(), + Namespace: GetDefaultNamespace(), + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "MultiClusterObservability", + Name: GetDefaultCRName(), + }, + }, + }, + } + + // Register operator types with the runtime scheme. + s := scheme.Scheme + mcov1beta2.SchemeBuilder.AddToScheme(s) + observatoriumv1alpha1.AddToScheme(s) + client := fake.NewFakeClientWithScheme(s, []runtime.Object{mco, observatorium}...) + + SetMonitoringCRName(GetDefaultCRName()) + SetOperandNames(client) + }, + result: func() bool { + return GetOperandName(Alertmanager) == GetOperandNamePrefix()+Alertmanager && + GetOperandName(Grafana) == GetOperandNamePrefix()+Grafana && + GetOperandName(Observatorium) == GetDefaultCRName() + }, + }, + } + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + c.prepare() + if !c.result() { + t.Errorf("case (%v) output is not the expected", c.name) + } + }) + } +} diff --git a/operators/multiclusterobservability/pkg/config/gcs_conf.go b/operators/multiclusterobservability/pkg/config/gcs_conf.go new file mode 100644 index 000000000..1bf7fbf02 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/gcs_conf.go @@ -0,0 +1,44 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "errors" + "strings" + + "gopkg.in/yaml.v2" +) + +func validateGCS(conf Config) error { + + if conf.Bucket == "" { + return errors.New("no bucket as gcs bucket name in config file") + } + + if conf.ServiceAccount == "" { + return errors.New("no service_account as google application credentials in config file") + } + + return nil +} + +// IsValidGCSConf is used to validate GCS configuration +func IsValidGCSConf(data []byte) (bool, error) { + var objectConfg ObjectStorgeConf + err := yaml.Unmarshal(data, &objectConfg) + if err != nil { + return false, err + } + + if strings.ToLower(objectConfg.Type) != "gcs" { + return false, errors.New("invalid type config, only GCS type is supported") + } + + err = validateGCS(objectConfg.Config) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/operators/multiclusterobservability/pkg/config/obj_storage_conf.go b/operators/multiclusterobservability/pkg/config/obj_storage_conf.go new file mode 100644 index 000000000..3fec9c9a2 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/obj_storage_conf.go @@ -0,0 +1,55 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "errors" + "strings" + + "gopkg.in/yaml.v2" +) + +// Config is for s3/azure/gcs compatiable configuration +type Config struct { + // s3 configuration + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + Insecure bool `yaml:"insecure"` + AccessKey string `yaml:"access_key"` + SecretKey string `yaml:"secret_key"` + + // azure configuration + // Bucket string `yaml:"bucket"` + StorageAccount string `yaml:"storage_account"` + StorageAccountKey string `yaml:"storage_account_key"` + Container string `yaml:"container"` + MaxRetries int32 `yaml:"max_retries"` + + // gcs configuration + // Endpoint string `yaml:"endpoint"` + ServiceAccount string `yaml:"service_account"` +} + +// CheckObjStorageConf is used to check/valid the object storage configurations +func CheckObjStorageConf(data []byte) (bool, error) { + var objectConfg ObjectStorgeConf + err := yaml.Unmarshal(data, &objectConfg) + if err != nil { + return false, err + } + + switch strings.ToLower(objectConfg.Type) { + case "s3": + return IsValidS3Conf(data) + + case "gcs": + return IsValidGCSConf(data) + + case "azure": + return IsValidAzureConf(data) + + default: + return false, errors.New("invalid object storage type config") + } +} diff --git a/operators/multiclusterobservability/pkg/config/obj_storage_conf_test.go b/operators/multiclusterobservability/pkg/config/obj_storage_conf_test.go new file mode 100644 index 000000000..231535485 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/obj_storage_conf_test.go @@ -0,0 +1,202 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "testing" +) + +func TestCheckObjStorageConf(t *testing.T) { + caseList := []struct { + conf []byte + name string + expected bool + }{ + { + conf: []byte(`type: s3 +config: + bucket: bucket + endpoint: endpoint + insecure: true + access_key: access_key + secret_key: secret_key`), + name: "valid s3 conf", + expected: true, + }, + + { + conf: []byte(`type: azure +config: + storage_account: storage_account + storage_account_key: storage_account_key + container: container + endpoint: endpoint + max_retries: 0`), + name: "valid azure conf", + expected: true, + }, + + { + conf: []byte(`type: gcs +config: + bucket: bucket + service_account: service_account`), + name: "valid gcs conf", + expected: true, + }, + + { + conf: []byte(`type: s3 +config: + bucket: "" + endpoint: endpoint + insecure: true + access_key: access_key + secret_key: secret_key`), + name: "no bucket", + expected: false, + }, + + { + conf: []byte(`type: s3 +config: + bucket: bucket + endpoint: "" + insecure: true + access_key: access_key + secret_key: secret_key`), + name: "no endpoint", + expected: false, + }, + + { + conf: []byte(`type: s3 +config: + bucket: bucket + endpoint: endpoint + insecure: true + access_key: "" + secret_key: secret_key`), + name: "no access_key", + expected: false, + }, + + { + conf: []byte(`type: s3 +config: + bucket: bucket + endpoint: endpoint + insecure: true + access_key: access_key + secret_key: ""`), + name: "no secret_key", + expected: false, + }, + + { + conf: []byte(`type: gcs +config: + bucket: "" + service_account: service_account`), + name: "no bucket", + expected: false, + }, + + { + conf: []byte(`type: gcs +config: + bucket: bucket + service_account: ""`), + name: "no service_account", + expected: false, + }, + + { + conf: []byte(`type: azure +config: + storage_account: "" + storage_account_key: storage_account_key + container: container + endpoint: endpoint + max_retries: 0`), + name: "no storage_account", + expected: false, + }, + + { + conf: []byte(`type: azure +config: + storage_account: storage_account + storage_account_key: "" + container: container + endpoint: endpoint + max_retries: 0`), + name: "no storage_account_key", + expected: false, + }, + + { + conf: []byte(`type: azure +config: + storage_account: storage_account + storage_account_key: storage_account_key + container: "" + endpoint: endpoint + max_retries: 0`), + name: "no container", + expected: false, + }, + + { + conf: []byte(`type: azure +config: + storage_account: storage_account + storage_account_key: storage_account_key + container: container + endpoint: "" + max_retries: 0`), + name: "no endpoint", + expected: false, + }, + + { + conf: []byte(`type: test +config: + bucket: bucket + endpoint: endpoint + insecure: true + access_key: access_key + secret_key: ""`), + name: "invalid type", + expected: false, + }, + + { + conf: []byte(` +config: + bucket: bucket + endpoint: endpoint + insecure: true + access_key: access_key + secret_key: secret_key`), + name: "invalid conf format", + expected: false, + }, + + { + conf: []byte(``), + name: "no conf", + expected: false, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + output, _ := CheckObjStorageConf(c.conf) + if output != c.expected { + t.Errorf("case (%v) output (%v) is not the expected (%v)", c.name, output, c.expected) + } + }) + } +} diff --git a/operators/multiclusterobservability/pkg/config/s3_conf.go b/operators/multiclusterobservability/pkg/config/s3_conf.go new file mode 100644 index 000000000..712991003 --- /dev/null +++ b/operators/multiclusterobservability/pkg/config/s3_conf.go @@ -0,0 +1,52 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + "errors" + "strings" + + "gopkg.in/yaml.v2" +) + +func validateS3(conf Config) error { + + if conf.Bucket == "" { + return errors.New("no s3 bucket in config file") + } + + if conf.Endpoint == "" { + return errors.New("no s3 endpoint in config file") + } + + if conf.AccessKey == "" { + return errors.New("no s3 access_key in config file") + } + + if conf.SecretKey == "" { + return errors.New("no s3 secret_key in config file") + } + + return nil +} + +// IsValidS3Conf is used to validate s3 configuration +func IsValidS3Conf(data []byte) (bool, error) { + var objectConfg ObjectStorgeConf + err := yaml.Unmarshal(data, &objectConfg) + if err != nil { + return false, err + } + + if strings.ToLower(objectConfg.Type) != "s3" { + return false, errors.New("invalid type config, only s3 type is supported") + } + + err = validateS3(objectConfg.Config) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer.go b/operators/multiclusterobservability/pkg/rendering/renderer.go new file mode 100644 index 000000000..ba7a91d3e --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer.go @@ -0,0 +1,167 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "fmt" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + obv1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering/templates" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +var log = logf.Log.WithName("renderer") + +type MCORenderer struct { + renderer *rendererutil.Renderer + cr *obv1beta2.MultiClusterObservability + renderGrafanaFns map[string]rendererutil.RenderFn + renderAlertManagerFns map[string]rendererutil.RenderFn + renderThanosFns map[string]rendererutil.RenderFn + renderProxyFns map[string]rendererutil.RenderFn +} + +func NewMCORenderer(multipleClusterMonitoring *obv1beta2.MultiClusterObservability) *MCORenderer { + mcoRenderer := &MCORenderer{ + renderer: rendererutil.NewRenderer(), + cr: multipleClusterMonitoring, + } + mcoRenderer.newGranfanaRenderer() + mcoRenderer.newAlertManagerRenderer() + mcoRenderer.newThanosRenderer() + mcoRenderer.newProxyRenderer() + return mcoRenderer +} + +func (r *MCORenderer) Render() ([]*unstructured.Unstructured, error) { + // load and render generic templates + genericTemplates, err := templates.GetOrLoadGenericTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + namespace := mcoconfig.GetDefaultNamespace() + labels := map[string]string{ + config.GetCrLabelKey(): r.cr.Name, + } + resources, err := r.renderer.RenderTemplates(genericTemplates, namespace, labels) + if err != nil { + return nil, err + } + + // load and render grafana templates + grafanaTemplates, err := templates.GetOrLoadGrafanaTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + grafanaResources, err := r.renderGrafanaTemplates(grafanaTemplates, namespace, labels) + if err != nil { + return nil, err + } + resources = append(resources, grafanaResources...) + + //load and render alertmanager templates + alertTemplates, err := templates.GetOrLoadAlertManagerTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + alertResources, err := r.renderAlertManagerTemplates(alertTemplates, namespace, labels) + if err != nil { + return nil, err + } + resources = append(resources, alertResources...) + + // load and render thanos templates + thanosTemplates, err := templates.GetOrLoadThanosTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + thanosResources, err := r.renderThanosTemplates(thanosTemplates, namespace, labels) + if err != nil { + return nil, err + } + resources = append(resources, thanosResources...) + + // load and render proxy templates + proxyTemplates, err := templates.GetOrLoadProxyTemplates(templatesutil.GetTemplateRenderer()) + if err != nil { + return nil, err + } + proxyResources, err := r.renderProxyTemplates(proxyTemplates, namespace, labels) + if err != nil { + return nil, err + } + resources = append(resources, proxyResources...) + + for idx := range resources { + if resources[idx].GetKind() == "Deployment" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + crLabelKey := config.GetCrLabelKey() + dep := obj.(*v1.Deployment) + dep.ObjectMeta.Labels[crLabelKey] = r.cr.Name + dep.Spec.Selector.MatchLabels[crLabelKey] = r.cr.Name + dep.Spec.Template.ObjectMeta.Labels[crLabelKey] = r.cr.Name + + spec := &dep.Spec.Template.Spec + spec.Containers[0].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + spec.NodeSelector = r.cr.Spec.NodeSelector + spec.Tolerations = r.cr.Spec.Tolerations + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: mcoconfig.GetImagePullSecret(r.cr.Spec)}, + } + + switch resources[idx].GetName() { + + case "observatorium-operator": + spec.Containers[0].Image = mcoconfig.DefaultImgRepository + "/" + + mcoconfig.ObservatoriumOperatorImgName + ":" + mcoconfig.DefaultImgTagSuffix + + found, image := mcoconfig.ReplaceImage(r.cr.Annotations, spec.Containers[0].Image, + mcoconfig.ObservatoriumOperatorImgKey) + if found { + spec.Containers[0].Image = image + } + dep.Name = mcoconfig.GetOperandName(config.ObservatoriumOperator) + + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + + resources[idx].Object = unstructuredObj + } + } + + return resources, nil +} + +func (r *MCORenderer) renderMutatingWebhookConfiguration(res *resource.Resource) (*unstructured.Unstructured, error) { + u := &unstructured.Unstructured{Object: res.Map()} + webooks, ok := u.Object["webhooks"].([]interface{}) + if !ok { + return nil, fmt.Errorf("failed to find webhooks spec field") + } + webhook := webooks[0].(map[string]interface{}) + clientConfig := webhook["clientConfig"].(map[string]interface{}) + service := clientConfig["service"].(map[string]interface{}) + + service["namespace"] = mcoconfig.GetDefaultNamespace() + return u, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go b/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go new file mode 100644 index 000000000..716f3a030 --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go @@ -0,0 +1,165 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "strconv" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiresource "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +func (r *MCORenderer) newAlertManagerRenderer() { + r.renderAlertManagerFns = map[string]rendererutil.RenderFn{ + "StatefulSet": r.renderAlertManagerStatefulSet, + "Service": r.renderer.RenderNamespace, + "ServiceAccount": r.renderer.RenderNamespace, + "ConfigMap": r.renderer.RenderNamespace, + "ClusterRole": r.renderer.RenderClusterRole, + "ClusterRoleBinding": r.renderer.RenderClusterRoleBinding, + "Secret": r.renderAlertManagerSecret, + "Role": r.renderer.RenderNamespace, + "RoleBinding": r.renderer.RenderNamespace, + "Ingress": r.renderer.RenderNamespace, + "PersistentVolumeClaim": r.renderer.RenderNamespace, + } +} + +func (r *MCORenderer) renderAlertManagerStatefulSet(res *resource.Resource, + namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u, err := r.renderer.RenderNamespace(res, namespace, labels) + if err != nil { + return nil, err + } + obj := util.GetK8sObj(u.GetKind()) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) + if err != nil { + return nil, err + } + crLabelKey := mcoconfig.GetCrLabelKey() + dep := obj.(*v1.StatefulSet) + dep.ObjectMeta.Labels[crLabelKey] = r.cr.Name + dep.Spec.Selector.MatchLabels[crLabelKey] = r.cr.Name + dep.Spec.Template.ObjectMeta.Labels[crLabelKey] = r.cr.Name + dep.Name = mcoconfig.GetOperandName(mcoconfig.Alertmanager) + dep.Spec.Replicas = mcoconfig.GetReplicas(mcoconfig.Alertmanager, r.cr.Spec.AdvancedConfig) + + spec := &dep.Spec.Template.Spec + spec.Containers[0].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + args := spec.Containers[0].Args + + if *dep.Spec.Replicas > 1 { + for i := int32(0); i < *dep.Spec.Replicas; i++ { + args = append(args, "--cluster.peer="+ + mcoconfig.GetOperandNamePrefix()+"alertmanager-"+ + strconv.Itoa(int(i))+".alertmanager-operated."+ + mcoconfig.GetDefaultNamespace()+".svc:9094") + } + } + + spec.Containers[0].Args = args + spec.Containers[0].Resources = mcoconfig.GetResources(mcoconfig.Alertmanager, r.cr.Spec.AdvancedConfig) + + spec.Containers[1].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + spec.NodeSelector = r.cr.Spec.NodeSelector + spec.Tolerations = r.cr.Spec.Tolerations + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: mcoconfig.GetImagePullSecret(r.cr.Spec)}, + } + + spec.Containers[0].Image = mcoconfig.DefaultImgRepository + "/" + mcoconfig.AlertManagerImgName + + ":" + mcoconfig.DefaultImgTagSuffix + //replace the alertmanager and config-reloader images + found, image := mcoconfig.ReplaceImage( + r.cr.Annotations, + mcoconfig.DefaultImgRepository+"/"+mcoconfig.AlertManagerImgName, + mcoconfig.AlertManagerImgKey) + if found { + spec.Containers[0].Image = image + } + + found, image = mcoconfig.ReplaceImage(r.cr.Annotations, mcoconfig.ConfigmapReloaderImgRepo, + mcoconfig.ConfigmapReloaderKey) + if found { + spec.Containers[1].Image = image + } + // the oauth-proxy image only exists in mch-image-manifest configmap + // pass nil annotation to make sure oauth-proxy overrided from mch-image-manifest + found, image = mcoconfig.ReplaceImage(nil, mcoconfig.OauthProxyImgRepo, + mcoconfig.OauthProxyKey) + if found { + spec.Containers[2].Image = image + } + //replace the volumeClaimTemplate + dep.Spec.VolumeClaimTemplates[0].Spec.StorageClassName = &r.cr.Spec.StorageConfig.StorageClass + dep.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = + apiresource.MustParse(r.cr.Spec.StorageConfig.AlertmanagerStorageSize) + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + + return &unstructured.Unstructured{Object: unstructuredObj}, nil +} + +func (r *MCORenderer) renderAlertManagerSecret(res *resource.Resource, + namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u, err := r.renderer.RenderNamespace(res, namespace, labels) + if err != nil { + return nil, err + } + + if u.GetName() == "alertmanager-proxy" { + obj := util.GetK8sObj(u.GetKind()) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) + if err != nil { + return nil, err + } + srt := obj.(*corev1.Secret) + p, err := util.GeneratePassword(43) + if err != nil { + return nil, err + } + srt.Data["session_secret"] = []byte(p) + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: unstructuredObj}, nil + } + + return u, nil +} + +func (r *MCORenderer) renderAlertManagerTemplates(templates []*resource.Resource, + namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { + uobjs := []*unstructured.Unstructured{} + for _, template := range templates { + render, ok := r.renderAlertManagerFns[template.GetKind()] + if !ok { + uobjs = append(uobjs, &unstructured.Unstructured{Object: template.Map()}) + continue + } + uobj, err := render(template.DeepCopy(), namespace, labels) + if err != nil { + return []*unstructured.Unstructured{}, err + } + if uobj == nil { + continue + } + uobjs = append(uobjs, uobj) + + } + + return uobjs, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go b/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go new file mode 100644 index 000000000..f42b78c1e --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go @@ -0,0 +1,96 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +func (r *MCORenderer) newGranfanaRenderer() { + r.renderGrafanaFns = map[string]rendererutil.RenderFn{ + "Deployment": r.renderGrafanaDeployments, + "Service": r.renderer.RenderNamespace, + "ServiceAccount": r.renderer.RenderNamespace, + "ConfigMap": r.renderer.RenderNamespace, + "ClusterRole": r.renderer.RenderClusterRole, + "ClusterRoleBinding": r.renderer.RenderClusterRoleBinding, + "Secret": r.renderer.RenderNamespace, + "Role": r.renderer.RenderNamespace, + "RoleBinding": r.renderer.RenderNamespace, + "Ingress": r.renderer.RenderNamespace, + "PersistentVolumeClaim": r.renderer.RenderNamespace, + } +} + +func (r *MCORenderer) renderGrafanaDeployments(res *resource.Resource, + namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u, err := r.renderer.RenderDeployments(res, namespace, labels) + if err != nil { + return nil, err + } + + obj := util.GetK8sObj(u.GetKind()) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) + if err != nil { + return nil, err + } + dep := obj.(*v1.Deployment) + dep.Name = config.GetOperandName(config.Grafana) + dep.Spec.Replicas = config.GetReplicas(config.Grafana, r.cr.Spec.AdvancedConfig) + + spec := &dep.Spec.Template.Spec + + spec.Containers[0].Image = config.DefaultImgRepository + "/" + config.GrafanaImgKey + + ":" + config.DefaultImgTagSuffix + found, image := config.ReplaceImage(r.cr.Annotations, spec.Containers[0].Image, config.GrafanaImgKey) + if found { + spec.Containers[0].Image = image + } + spec.Containers[0].Resources = config.GetResources(config.Grafana, r.cr.Spec.AdvancedConfig) + + spec.Containers[1].Image = config.DefaultImgRepository + "/" + config.GrafanaDashboardLoaderName + + ":" + config.DefaultImgTagSuffix + found, image = config.ReplaceImage(r.cr.Annotations, spec.Containers[1].Image, + config.GrafanaDashboardLoaderKey) + if found { + spec.Containers[1].Image = image + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + + return &unstructured.Unstructured{Object: unstructuredObj}, nil +} + +func (r *MCORenderer) renderGrafanaTemplates(templates []*resource.Resource, + namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { + uobjs := []*unstructured.Unstructured{} + for _, template := range templates { + render, ok := r.renderGrafanaFns[template.GetKind()] + if !ok { + uobjs = append(uobjs, &unstructured.Unstructured{Object: template.Map()}) + continue + } + uobj, err := render(template.DeepCopy(), namespace, labels) + if err != nil { + return []*unstructured.Unstructured{}, err + } + if uobj == nil { + continue + } + uobjs = append(uobjs, uobj) + + } + + return uobjs, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go b/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go new file mode 100644 index 000000000..2860925f7 --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go @@ -0,0 +1,165 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "strings" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +func (r *MCORenderer) newProxyRenderer() { + r.renderProxyFns = map[string]rendererutil.RenderFn{ + "Deployment": r.renderProxyDeployment, + "Service": r.renderer.RenderNamespace, + "ServiceAccount": r.renderer.RenderNamespace, + "ConfigMap": r.renderer.RenderNamespace, + "ClusterRole": r.renderer.RenderClusterRole, + "ClusterRoleBinding": r.renderer.RenderClusterRoleBinding, + "Secret": r.renderProxySecret, + "Role": r.renderer.RenderNamespace, + "RoleBinding": r.renderer.RenderNamespace, + "Ingress": r.renderer.RenderNamespace, + "PersistentVolumeClaim": r.renderer.RenderNamespace, + } +} + +func (r *MCORenderer) renderProxyDeployment(res *resource.Resource, + namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u, err := r.renderer.RenderDeployments(res, namespace, labels) + if err != nil { + return nil, err + } + obj := util.GetK8sObj(u.GetKind()) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) + if err != nil { + return nil, err + } + + crLabelKey := mcoconfig.GetCrLabelKey() + dep := obj.(*v1.Deployment) + dep.ObjectMeta.Labels[crLabelKey] = r.cr.Name + dep.Spec.Selector.MatchLabels[crLabelKey] = r.cr.Name + dep.Spec.Template.ObjectMeta.Labels[crLabelKey] = r.cr.Name + dep.Name = mcoconfig.GetOperandName(config.RBACQueryProxy) + dep.Spec.Replicas = config.GetReplicas(config.RBACQueryProxy, r.cr.Spec.AdvancedConfig) + + spec := &dep.Spec.Template.Spec + spec.Containers[0].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + args0 := spec.Containers[0].Args + for idx := range args0 { + args0[idx] = strings.Replace(args0[idx], "{{MCO_NAMESPACE}}", mcoconfig.GetDefaultNamespace(), 1) + args0[idx] = strings.Replace(args0[idx], "{{OBSERVATORIUM_NAME}}", mcoconfig.GetOperandName(mcoconfig.Observatorium), 1) + } + spec.Containers[0].Args = args0 + spec.Containers[0].Resources = mcoconfig.GetResources(mcoconfig.RBACQueryProxy, r.cr.Spec.AdvancedConfig) + + spec.Containers[1].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + args1 := spec.Containers[1].Args + for idx := range args1 { + args1[idx] = strings.Replace(args1[idx], "{{MCO_NAMESPACE}}", mcoconfig.GetDefaultNamespace(), 1) + } + spec.Containers[1].Args = args1 + spec.NodeSelector = r.cr.Spec.NodeSelector + spec.Tolerations = r.cr.Spec.Tolerations + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: mcoconfig.GetImagePullSecret(r.cr.Spec)}, + } + + spec.Containers[0].Image = config.DefaultImgRepository + "/" + config.RBACQueryProxyImgName + + ":" + config.DefaultImgTagSuffix + //replace the proxy image + found, image := mcoconfig.ReplaceImage( + r.cr.Annotations, + spec.Containers[0].Image, + mcoconfig.RBACQueryProxyKey) + if found { + spec.Containers[0].Image = image + } + + // the oauth-proxy image only exists in mch-image-manifest configmap + // pass nil annotation to make sure oauth-proxy overrided from mch-image-manifest + found, image = mcoconfig.ReplaceImage(nil, mcoconfig.OauthProxyImgRepo, + mcoconfig.OauthProxyKey) + if found { + spec.Containers[1].Image = image + } + + for idx := range spec.Volumes { + if spec.Volumes[idx].Name == "ca-certs" { + spec.Volumes[idx].Secret.SecretName = mcoconfig.ServerCerts + } + if spec.Volumes[idx].Name == "client-certs" { + spec.Volumes[idx].Secret.SecretName = mcoconfig.GrafanaCerts + } + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + + return &unstructured.Unstructured{Object: unstructuredObj}, nil +} + +func (r *MCORenderer) renderProxySecret(res *resource.Resource, + namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u, err := r.renderer.RenderNamespace(res, namespace, labels) + if err != nil { + return nil, err + } + + if u.GetName() == "rbac-proxy-cookie-secret" { + obj := util.GetK8sObj(u.GetKind()) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) + if err != nil { + return nil, err + } + srt := obj.(*corev1.Secret) + p, err := util.GeneratePassword(16) + if err != nil { + return nil, err + } + srt.Data["session_secret"] = []byte(p) + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: unstructuredObj}, nil + } + + return u, nil +} + +func (r *MCORenderer) renderProxyTemplates(templates []*resource.Resource, + namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { + uobjs := []*unstructured.Unstructured{} + for _, template := range templates { + render, ok := r.renderProxyFns[template.GetKind()] + if !ok { + uobjs = append(uobjs, &unstructured.Unstructured{Object: template.Map()}) + continue + } + uobj, err := render(template.DeepCopy(), namespace, labels) + if err != nil { + return []*unstructured.Unstructured{}, err + } + if uobj == nil { + continue + } + uobjs = append(uobjs, uobj) + + } + + return uobjs, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_test.go b/operators/multiclusterobservability/pkg/rendering/renderer_test.go new file mode 100644 index 000000000..efcc6d01e --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer_test.go @@ -0,0 +1,62 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "os" + "path" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +func TestRender(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working dir %v", err) + } + templatesPath := path.Join(path.Dir(path.Dir(wd)), "manifests") + os.Setenv(templatesutil.TemplatesPathEnvVar, templatesPath) + defer os.Unsetenv(templatesutil.TemplatesPathEnvVar) + + mchcr := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test"}, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + ImagePullPolicy: "Always", + ImagePullSecret: "test", + StorageConfig: &mcov1beta2.StorageConfig{ + MetricObjectStorage: &mcoshared.PreConfiguredStorage{ + Key: "test", + Name: "test", + }, + StorageClass: "gp2", + AlertmanagerStorageSize: "1Gi", + CompactStorageSize: "1Gi", + RuleStorageSize: "1Gi", + ReceiveStorageSize: "1Gi", + StoreStorageSize: "1Gi", + }, + }, + } + + renderer := NewMCORenderer(mchcr) + objs, err := renderer.Render() + if err != nil { + t.Fatalf("failed to render MultiClusterObservability: %v", err) + } + + printObjs(t, objs) +} + +func printObjs(t *testing.T, objs []*unstructured.Unstructured) { + for _, obj := range objs { + t.Log(obj) + } +} diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_thanos.go b/operators/multiclusterobservability/pkg/rendering/renderer_thanos.go new file mode 100644 index 000000000..84907536c --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/renderer_thanos.go @@ -0,0 +1,44 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/v3/pkg/resource" + + rendererutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering" +) + +func (r *MCORenderer) newThanosRenderer() { + r.renderThanosFns = map[string]rendererutil.RenderFn{ + "ServiceAccount": r.renderer.RenderNamespace, + "ConfigMap": r.renderer.RenderNamespace, + "ClusterRole": r.renderer.RenderClusterRole, + "ClusterRoleBinding": r.renderer.RenderClusterRoleBinding, + "Secret": r.renderer.RenderNamespace, + } +} + +func (r *MCORenderer) renderThanosTemplates(templates []*resource.Resource, + namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { + uobjs := []*unstructured.Unstructured{} + for _, template := range templates { + render, ok := r.renderThanosFns[template.GetKind()] + if !ok { + uobjs = append(uobjs, &unstructured.Unstructured{Object: template.Map()}) + continue + } + uobj, err := render(template.DeepCopy(), namespace, labels) + if err != nil { + return []*unstructured.Unstructured{}, err + } + if uobj == nil { + continue + } + uobjs = append(uobjs, uobj) + + } + + return uobjs, nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/templates/templates.go b/operators/multiclusterobservability/pkg/rendering/templates/templates.go new file mode 100644 index 000000000..37c69abe1 --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/templates/templates.go @@ -0,0 +1,138 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package templates + +import ( + "path" + + "sigs.k8s.io/kustomize/v3/pkg/resource" + + "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +// *Templates contains all kustomize resources +var genericTemplates, grafanaTemplates, alertManagerTemplates, thanosTemplates, proxyTemplates, endpointObservabilityTemplates, prometheusTemplates []*resource.Resource + +// GetOrLoadGenericTemplates reads base manifest +func GetOrLoadGenericTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(genericTemplates) > 0 { + return genericTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "base") + + // add observatorium template + if err := r.AddTemplateFromPath(basePath+"/observatorium", &genericTemplates); err != nil { + return genericTemplates, err + } + + // add config template + if err := r.AddTemplateFromPath(basePath+"/config", &genericTemplates); err != nil { + return genericTemplates, err + } + + return genericTemplates, nil +} + +// GetOrLoadGrafanaTemplates reads the grafana manifests +func GetOrLoadGrafanaTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(grafanaTemplates) > 0 { + return grafanaTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "base") + + // add grafana template + if err := r.AddTemplateFromPath(basePath+"/grafana", &grafanaTemplates); err != nil { + return grafanaTemplates, err + } + return grafanaTemplates, nil +} + +// GetOrLoadAlertManagerTemplates reads the alertmanager manifests +func GetOrLoadAlertManagerTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(alertManagerTemplates) > 0 { + return alertManagerTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "base") + + // add alertmanager template + if err := r.AddTemplateFromPath(basePath+"/alertmanager", &alertManagerTemplates); err != nil { + return alertManagerTemplates, err + } + return alertManagerTemplates, nil +} + +// GetOrLoadThanosTemplates reads the thanos manifests +func GetOrLoadThanosTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(thanosTemplates) > 0 { + return thanosTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "base") + + // add thanos template + if err := r.AddTemplateFromPath(basePath+"/thanos", &thanosTemplates); err != nil { + return thanosTemplates, err + } + return thanosTemplates, nil +} + +// GetOrLoadProxyTemplates reads the rbac-query-proxy manifests +func GetOrLoadProxyTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(proxyTemplates) > 0 { + return proxyTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "base") + + // add rbac-query-proxy template + if err := r.AddTemplateFromPath(basePath+"/proxy", &proxyTemplates); err != nil { + return proxyTemplates, err + } + return proxyTemplates, nil +} + +// GetEndpointObservabilityTemplates reads endpoint-observability manifest +func GetOrLoadEndpointObservabilityTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(endpointObservabilityTemplates) > 0 { + return endpointObservabilityTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "endpoint-observability") + + // add endpoint ovservability template + if err := r.AddTemplateFromPath(basePath, &endpointObservabilityTemplates); err != nil { + return endpointObservabilityTemplates, err + } + + return endpointObservabilityTemplates, nil +} + +// GetOrLoadPrometheusTemplates reads endpoint-observability manifest +func GetOrLoadPrometheusTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { + if len(prometheusTemplates) > 0 { + return prometheusTemplates, nil + } + + basePath := path.Join(r.GetTemplatesPath(), "prometheus") + + // add endpoint ovservability template + if err := r.AddTemplateFromPath(basePath, &prometheusTemplates); err != nil { + return prometheusTemplates, err + } + + return prometheusTemplates, nil +} + +// ResetTemplates reset all the loaded templates +func ResetTemplates() { + genericTemplates = nil + grafanaTemplates = nil + alertManagerTemplates = nil + thanosTemplates = nil + proxyTemplates = nil + endpointObservabilityTemplates = nil +} diff --git a/operators/multiclusterobservability/pkg/rendering/templates/templates_test.go b/operators/multiclusterobservability/pkg/rendering/templates/templates_test.go new file mode 100644 index 000000000..c75d05aa4 --- /dev/null +++ b/operators/multiclusterobservability/pkg/rendering/templates/templates_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package templates + +import ( + "os" + "path" + "testing" + + templatesutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/rendering/templates" +) + +func TestGetCoreTemplates(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working dir %v", err) + } + templatesPath := path.Join(path.Dir(path.Dir(path.Dir(wd))), "manifests") + os.Setenv(templatesutil.TemplatesPathEnvVar, templatesPath) + defer os.Unsetenv(templatesutil.TemplatesPathEnvVar) + + _, err = GetOrLoadGenericTemplates(templatesutil.GetTemplateRenderer()) + + if err != nil { + t.Fatalf("failed to render core template %v", err) + } +} diff --git a/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go new file mode 100644 index 000000000..45c82f11a --- /dev/null +++ b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go @@ -0,0 +1,151 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package servicemonitor + +import ( + "context" + "os" + "reflect" + "time" + + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promclientset "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/tools/cache" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +const ( + ocpMonitoringNamespace = "openshift-monitoring" + metricsNamePrefix = "acm_" +) + +var ( + log = logf.Log.WithName("sm_controller") + isSmControllerRunnning = false +) + +func Start() { + + if isSmControllerRunnning { + return + } + isSmControllerRunnning = true + + promClient, err := promclientset.NewForConfig(ctrl.GetConfigOrDie()) + if err != nil { + log.Error(err, "Failed to create prom client") + os.Exit(1) + } + watchlist := cache.NewListWatchFromClient(promClient.MonitoringV1().RESTClient(), "servicemonitors", config.GetDefaultNamespace(), + fields.Everything()) + _, controller := cache.NewInformer( + watchlist, + &promv1.ServiceMonitor{}, + time.Minute*60, + cache.ResourceEventHandlerFuncs{ + AddFunc: onAdd(promClient), + DeleteFunc: onDelete(promClient), + UpdateFunc: onUpdate(promClient), + }, + ) + + stop := make(chan struct{}) + go controller.Run(stop) +} + +func onAdd(promClient promclientset.Interface) func(obj interface{}) { + return func(obj interface{}) { + sm := obj.(*promv1.ServiceMonitor) + if sm.ObjectMeta.OwnerReferences != nil && sm.ObjectMeta.OwnerReferences[0].Kind == "Observatorium" { + updateServiceMonitor(promClient, sm) + } + } +} + +func onDelete(promClient promclientset.Interface) func(obj interface{}) { + return func(obj interface{}) { + sm := obj.(*promv1.ServiceMonitor) + if sm.ObjectMeta.OwnerReferences != nil && sm.ObjectMeta.OwnerReferences[0].Kind == "Observatorium" { + err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Delete(context.TODO(), sm.Name, metav1.DeleteOptions{}) + if err != nil { + log.Error(err, "Failed to delete ServiceMonitor", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } else { + log.Info("ServiceMonitor Deleted", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } + } + } +} + +func onUpdate(promClient promclientset.Interface) func(newObj interface{}, oldObj interface{}) { + return func(newObj interface{}, oldObj interface{}) { + newSm := newObj.(*promv1.ServiceMonitor) + oldSm := oldObj.(*promv1.ServiceMonitor) + if newSm.ObjectMeta.OwnerReferences != nil && newSm.ObjectMeta.OwnerReferences[0].Kind == "Observatorium" && + !reflect.DeepEqual(newSm.Spec, oldSm.Spec) { + updateServiceMonitor(promClient, newSm) + } + } +} + +func updateServiceMonitor(promClient promclientset.Interface, sm *promv1.ServiceMonitor) { + found, err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Get(context.TODO(), sm.Name, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + _, err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Create(context.TODO(), + rewriteLabels(sm, ""), metav1.CreateOptions{}) + if err != nil { + log.Error(err, "Failed to create ServiceMonitor", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } else { + log.Info("ServiceMonitor Created", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } + } else { + log.Error(err, "Failed to check ServiceMonitor", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } + return + } + _, err = promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Update(context.TODO(), + rewriteLabels(sm, found.ResourceVersion), metav1.UpdateOptions{}) + if err != nil { + log.Error(err, "Failed to update ServiceMonitor", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } else { + log.Info("ServiceMonitor Updated", "namespace", ocpMonitoringNamespace, "name", sm.Name) + } +} + +func rewriteLabels(sm *promv1.ServiceMonitor, resourceVersion string) *promv1.ServiceMonitor { + update := &promv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: sm.Name, + Namespace: ocpMonitoringNamespace, + }, + } + endpoints := []promv1.Endpoint{} + for _, endpoint := range sm.Spec.Endpoints { + metricsRelabels := endpoint.MetricRelabelConfigs + if metricsRelabels == nil { + metricsRelabels = []*promv1.RelabelConfig{} + } + metricsRelabels = append(metricsRelabels, &promv1.RelabelConfig{ + SourceLabels: []string{"__name__"}, + Regex: "(.+)", + TargetLabel: "__name__", + Replacement: metricsNamePrefix + "${1}", + }) + endpoint.MetricRelabelConfigs = metricsRelabels + endpoints = append(endpoints, endpoint) + } + sm.Spec.Endpoints = endpoints + sm.Spec.NamespaceSelector = promv1.NamespaceSelector{ + MatchNames: []string{config.GetDefaultNamespace()}, + } + update.Spec = sm.Spec + update.ResourceVersion = resourceVersion + return update +} diff --git a/operators/multiclusterobservability/pkg/servicemonitor/sm_controller_test.go b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller_test.go new file mode 100644 index 000000000..f7376a9ef --- /dev/null +++ b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller_test.go @@ -0,0 +1,35 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package servicemonitor + +import ( + "testing" + + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestRewriteLabels(t *testing.T) { + sm := &promv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: ocpMonitoringNamespace, + }, + Spec: promv1.ServiceMonitorSpec{ + Endpoints: []promv1.Endpoint{ + { + Path: "test", + }, + }, + }, + } + updated := rewriteLabels(sm, "") + if len(updated.Spec.NamespaceSelector.MatchNames) == 0 || updated.Spec.NamespaceSelector.MatchNames[0] != config.GetDefaultNamespace() { + t.Errorf("Wrong NamespaceSelector: %v", updated.Spec.NamespaceSelector) + } + if len(updated.Spec.Endpoints[0].MetricRelabelConfigs) != 1 { + t.Errorf("Wrong MetricRelabelConfigs: %v", updated.Spec.Endpoints[0].MetricRelabelConfigs) + } +} diff --git a/operators/multiclusterobservability/pkg/util/client.go b/operators/multiclusterobservability/pkg/util/client.go new file mode 100644 index 000000000..de9c5b7b9 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/client.go @@ -0,0 +1,161 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + "fmt" + + ocpClientSet "github.com/openshift/client-go/config/clientset/versioned" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + crdClientSet "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +var log = logf.Log.WithName("util") + +var ( + kubeClient kubernetes.Interface + crdClient crdClientSet.Interface + ocpClient ocpClientSet.Interface +) + +// GetOrCreateKubeClient gets existing kubeclient or creates new one if it doesn't exist +func GetOrCreateKubeClient() (kubernetes.Interface, error) { + if kubeClient != nil { + return kubeClient, nil + } + // create the config from the path + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + log.Error(err, "Failed to create the config") + return nil, err + } + + // generate the client based off of the config + kubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + log.Error(err, "Failed to create kube client") + return nil, err + } + + return kubeClient, nil +} + +// GetOrCreateOCPClient creates ocp client +func GetOrCreateOCPClient() (ocpClientSet.Interface, error) { + if crdClient != nil { + return ocpClient, nil + } + // create the config from the path + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + log.Error(err, "Failed to create the config") + return nil, err + } + + // generate the client based off of the config + ocpClient, err = ocpClientSet.NewForConfig(config) + if err != nil { + log.Error(err, "Failed to create ocp config client") + return nil, err + } + + return ocpClient, err +} + +// GetOrCreateCRDClient gets an existing or creates a new CRD client +func GetOrCreateCRDClient() (crdClientSet.Interface, error) { + if crdClient != nil { + return crdClient, nil + } + // create the config from the path + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + log.Error(err, "Failed to create the config") + return nil, err + } + + // generate the client based off of the config + crdClient, err = crdClientSet.NewForConfig(config) + if err != nil { + log.Error(err, "Failed to create CRD config client") + return nil, err + } + + return crdClient, err +} + +func CheckCRDExist(crdClient crdClientSet.Interface, crdName string) (bool, error) { + _, err := crdClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + log.Info("unable to get CRD with ApiextensionsV1 Client, not found.", "CRD", crdName) + return false, nil + } + log.Error(err, "failed to get CRD with ApiextensionsV1 Client", "CRD", crdName) + return false, err + } + return true, nil +} + +func UpdateCRDWebhookNS(crdClient crdClientSet.Interface, namespace, crdName string) error { + crdObj, err := crdClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{}) + if err != nil { + log.Error(err, "failed to get CRD", "CRD", crdName) + return err + } + if crdObj.Spec.Conversion == nil || crdObj.Spec.Conversion.Webhook == nil || crdObj.Spec.Conversion.Webhook.ClientConfig == nil { + log.Error(err, "empty Conversion in the CRD", "CRD", crdName) + return fmt.Errorf("empty Conversion in the CRD %s", crdName) + } + if crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace != namespace { + log.Info("updating the webhook service namespace", "original namespace", crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace, "new namespace", namespace) + crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace = namespace + _, err := crdClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crdObj, metav1.UpdateOptions{}) + if err != nil { + log.Error(err, "failed to update webhook service namespace") + return err + } + } + return nil +} + +// GetPVCList get pvc with matched labels +func GetPVCList(c client.Client, matchLabels map[string]string) ([]corev1.PersistentVolumeClaim, error) { + pvcList := &corev1.PersistentVolumeClaimList{} + pvcListOpts := []client.ListOption{ + client.InNamespace(config.GetDefaultNamespace()), + client.MatchingLabels(matchLabels), + } + + err := c.List(context.TODO(), pvcList, pvcListOpts...) + if err != nil { + return nil, err + } + return pvcList.Items, nil +} + +// GetStatefulSetList get sts with matched labels +func GetStatefulSetList(c client.Client, matchLabels map[string]string) ([]appsv1.StatefulSet, error) { + stsList := &appsv1.StatefulSetList{} + stsListOpts := []client.ListOption{ + client.InNamespace(config.GetDefaultNamespace()), + client.MatchingLabels(matchLabels), + } + + err := c.List(context.TODO(), stsList, stsListOpts...) + if err != nil { + return nil, err + } + return stsList.Items, nil +} diff --git a/operators/multiclusterobservability/pkg/util/clustermanagementaddon.go b/operators/multiclusterobservability/pkg/util/clustermanagementaddon.go new file mode 100644 index 000000000..311f54584 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/clustermanagementaddon.go @@ -0,0 +1,105 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + ObservabilityController = "observability-controller" + grafanaLink = "/grafana/d/2b679d600f3b9e7676a7c5ac3643d448/acm-clusters-overview" +) + +type clusterManagementAddOnSpec struct { + DisplayName string `json:"displayName"` + Description string `json:"description"` + CRDName string `json:"crdName"` +} + +func CreateClusterManagementAddon(c client.Client) error { + clusterManagementAddon := newClusterManagementAddon() + found := &addonv1alpha1.ClusterManagementAddOn{} + err := c.Get(context.TODO(), types.NamespacedName{Name: ObservabilityController}, found) + if err != nil && errors.IsNotFound(err) { + if err := c.Create(context.TODO(), clusterManagementAddon); err != nil { + log.Error(err, "Failed to create observability-controller clustermanagementaddon ") + return err + } + log.Info("Created observability-controller clustermanagementaddon") + return nil + } else if err != nil { + log.Error(err, "Cannot create observability-controller clustermanagementaddon") + return err + } + + if !reflect.DeepEqual(found.Spec, clusterManagementAddon.Spec) || + !reflect.DeepEqual(found.ObjectMeta.Annotations, clusterManagementAddon.ObjectMeta.Annotations) { + log.Info("Updating observability-controller clustermanagementaddon") + clusterManagementAddon.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), clusterManagementAddon) + if err != nil { + log.Error(err, "Failed to update observability-controller clustermanagementaddon") + return err + } + return nil + } + + log.Info(fmt.Sprintf("%s clustermanagementaddon is present ", ObservabilityController)) + return nil +} + +func DeleteClusterManagementAddon(client client.Client) error { + clustermanagementaddon := &addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: ObservabilityController, + }, + } + err := client.Delete(context.TODO(), clustermanagementaddon) + if err != nil && !errors.IsNotFound(err) { + log.Error(err, "Failed to delete clustermanagementaddon", "name", ObservabilityController) + return err + } + log.Info("ClusterManagementAddon deleted", "name", ObservabilityController) + return nil +} + +func newClusterManagementAddon() *addonv1alpha1.ClusterManagementAddOn { + clusterManagementAddOnSpec := clusterManagementAddOnSpec{ + DisplayName: "Observability Controller", + Description: "Manages Observability components.", + CRDName: "observabilityaddons.observability.open-cluster-management.io", + } + return &addonv1alpha1.ClusterManagementAddOn{ + TypeMeta: metav1.TypeMeta{ + APIVersion: addonv1alpha1.SchemeGroupVersion.String(), + Kind: "ClusterManagementAddOn", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ObservabilityController, + Annotations: map[string]string{ + "console.open-cluster-management.io/launch-link": grafanaLink, + "console.open-cluster-management.io/launch-link-text": "Grafana", + }, + }, + Spec: addonv1alpha1.ClusterManagementAddOnSpec{ + AddOnMeta: addonv1alpha1.AddOnMeta{ + DisplayName: clusterManagementAddOnSpec.DisplayName, + Description: clusterManagementAddOnSpec.Description, + }, + AddOnConfiguration: addonv1alpha1.ConfigCoordinates{ + CRDName: clusterManagementAddOnSpec.CRDName, + }, + }, + } +} diff --git a/operators/multiclusterobservability/pkg/util/clustermanagementaddon_test.go b/operators/multiclusterobservability/pkg/util/clustermanagementaddon_test.go new file mode 100644 index 000000000..8998d65d5 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/clustermanagementaddon_test.go @@ -0,0 +1,64 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +func TestClusterManagmentAddon(t *testing.T) { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + c := fake.NewFakeClient() + err := CreateClusterManagementAddon(c) + if err != nil { + t.Fatalf("Failed to create clustermanagementaddon: (%v)", err) + } + err = CreateClusterManagementAddon(c) + if err != nil { + t.Fatalf("Failed to create clustermanagementaddon twice: (%v)", err) + } + addon := &addonv1alpha1.ClusterManagementAddOn{} + err = c.Get(context.TODO(), + types.NamespacedName{ + Name: ObservabilityController, + }, + addon, + ) + if err != nil { + t.Fatalf("Failed to get clustermanagementaddon: (%v)", err) + } + if addon.Spec.AddOnConfiguration.CRDName != "observabilityaddons.observability.open-cluster-management.io" { + t.Fatalf("Wrong CRD name included: %s", addon.Spec.AddOnConfiguration.CRDName) + } + if linkTxt, found := addon.ObjectMeta.Annotations["console.open-cluster-management.io/launch-link-text"]; found == false { + t.Fatalf("No launch-link-text annotation included") + } else { + if linkTxt != "Grafana" { + t.Fatalf("Wrong launch-link-text annotation: %s", linkTxt) + } + } + + err = DeleteClusterManagementAddon(c) + if err != nil { + t.Fatalf("Failed to delete clustermanagementaddon: (%v)", err) + } + err = c.Get(context.TODO(), + types.NamespacedName{ + Name: ObservabilityController, + }, + addon, + ) + if err == nil || !errors.IsNotFound(err) { + t.Fatalf("Failed to delete clustermanagementaddon: (%v)", err) + } +} diff --git a/operators/multiclusterobservability/pkg/util/managedclusteraddon.go b/operators/multiclusterobservability/pkg/util/managedclusteraddon.go new file mode 100644 index 000000000..4b59fa2c6 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/managedclusteraddon.go @@ -0,0 +1,132 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + "os" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ManagedClusterAddonName = "observability-controller" +) + +var ( + spokeNameSpace = os.Getenv("SPOKE_NAMESPACE") +) + +func CreateManagedClusterAddonCR(c client.Client, namespace, labelKey, labelValue string) error { + newManagedClusterAddon := &addonv1alpha1.ManagedClusterAddOn{ + TypeMeta: metav1.TypeMeta{ + APIVersion: addonv1alpha1.SchemeGroupVersion.String(), + Kind: "ManagedClusterAddOn", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ManagedClusterAddonName, + Namespace: namespace, + Labels: map[string]string{ + labelKey: labelValue, + }, + }, + Spec: addonv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: spokeNameSpace, + }, + } + managedClusterAddon := &addonv1alpha1.ManagedClusterAddOn{} + // check if managedClusterAddon exists + if err := c.Get( + context.TODO(), + types.NamespacedName{ + Name: ManagedClusterAddonName, + Namespace: namespace, + }, + managedClusterAddon, + ); err != nil && errors.IsNotFound(err) { + // create new managedClusterAddon + if err := c.Create(context.TODO(), newManagedClusterAddon); err != nil { + log.Error(err, "failed to create managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + return err + } + + // wait 10s for the created managedclusteraddon ready + if errPoll := wait.Poll(2*time.Second, 10*time.Second, func() (bool, error) { + if err := c.Get( + context.TODO(), + types.NamespacedName{ + Name: ManagedClusterAddonName, + Namespace: namespace, + }, + managedClusterAddon, + ); err == nil { + return true, nil + } + return false, err + }); errPoll != nil { + log.Error(errPoll, "failed to get the created managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + return errPoll + } + + // got the created managedclusteraddon just now, uopdating its status + managedClusterAddon.Status.AddOnConfiguration = addonv1alpha1.ConfigCoordinates{ + CRDName: "observabilityaddons.observability.open-cluster-management.io", + CRName: "observability-addon", + } + managedClusterAddon.Status.AddOnMeta = addonv1alpha1.AddOnMeta{ + DisplayName: "Observability Controller", + Description: "Manages Observability components.", + } + if len(managedClusterAddon.Status.Conditions) > 0 { + managedClusterAddon.Status.Conditions = append(managedClusterAddon.Status.Conditions, metav1.Condition{ + Type: "Progressing", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: "ManifestWorkCreated", + Message: "Addon Installing", + }) + } else { + managedClusterAddon.Status.Conditions = []metav1.Condition{ + { + Type: "Progressing", + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(time.Now()), + Reason: "ManifestWorkCreated", + Message: "Addon Installing", + }, + } + } + // update status for the created managedclusteraddon + if err := c.Status().Update(context.TODO(), managedClusterAddon); err != nil { + log.Error(err, "failed to update status for managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + return err + } + return nil + } else if err != nil { + log.Error(err, "failed to get managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + return err + } + + // managedclusteraddon already exists, updating... + if !reflect.DeepEqual(managedClusterAddon.Spec, newManagedClusterAddon.Spec) { + log.Info("found difference, updating managedClusterAddon", "name", ManagedClusterAddonName, "namespace", namespace) + newManagedClusterAddon.ObjectMeta.ResourceVersion = managedClusterAddon.ObjectMeta.ResourceVersion + err := c.Update(context.TODO(), newManagedClusterAddon) + if err != nil { + log.Error(err, "failed to update managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + return err + } + return nil + } + + log.Info("ManagedClusterAddOn is created or updated successfully", "name", ManagedClusterAddonName, "namespace", namespace) + return nil +} diff --git a/operators/multiclusterobservability/pkg/util/managedclusteraddon_test.go b/operators/multiclusterobservability/pkg/util/managedclusteraddon_test.go new file mode 100644 index 000000000..9fa046d8f --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/managedclusteraddon_test.go @@ -0,0 +1,41 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +const ( + name = "test" + namespace = "test" +) + +func TestManagedClusterAddon(t *testing.T) { + s := scheme.Scheme + addonv1alpha1.AddToScheme(s) + c := fake.NewFakeClient() + err := CreateManagedClusterAddonCR(c, namespace, "testKey", "value") + if err != nil { + t.Fatalf("Failed to create managedclusteraddon: (%v)", err) + } + addon := &addonv1alpha1.ManagedClusterAddOn{} + err = c.Get(context.TODO(), + types.NamespacedName{ + Name: ManagedClusterAddonName, + Namespace: namespace, + }, + addon, + ) + if err != nil { + t.Fatalf("Failed to get managedclusteraddon: (%v)", err) + } +} diff --git a/operators/multiclusterobservability/pkg/webhook/webhook_controller.go b/operators/multiclusterobservability/pkg/webhook/webhook_controller.go new file mode 100644 index 000000000..4800ac902 --- /dev/null +++ b/operators/multiclusterobservability/pkg/webhook/webhook_controller.go @@ -0,0 +1,130 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package webhook + +import ( + "context" + "reflect" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +var log = logf.Log.WithName("webhook-controller") + +// WebhookController define the controller that manages(create, update and delete) the webhook configurations. +// it implements the Runnable interface from https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/manager#Runnable +type WebhookController struct { + client client.Client + mutatingWebhook *admissionregistrationv1.MutatingWebhookConfiguration + validatingWebhook *admissionregistrationv1.ValidatingWebhookConfiguration +} + +// NewWebhookController create the WebhookController. +func NewWebhookController(client client.Client, mwh *admissionregistrationv1.MutatingWebhookConfiguration, vwh *admissionregistrationv1.ValidatingWebhookConfiguration) *WebhookController { + return &WebhookController{ + client: client, + mutatingWebhook: mwh, + validatingWebhook: vwh, + } +} + +// Start runs the WebhookController with the given context. +// it will create the corresponding webhook configuration with the client +// at starting and remove it when context done singal is received +// currently the controller will not watch the change of the webhook configurations. +func (wc *WebhookController) Start(ctx context.Context) error { + if wc.mutatingWebhook != nil { + log.V(1).Info("creating or updating the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + foundMwhc := &admissionregistrationv1.MutatingWebhookConfiguration{} + if err := wc.client.Get(context.TODO(), types.NamespacedName{Name: wc.mutatingWebhook.GetName()}, foundMwhc); err != nil && apierrors.IsNotFound(err) { + if err := wc.client.Create(context.TODO(), wc.mutatingWebhook); err != nil { + log.V(1).Info("failed to create the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) + return err + } + log.V(1).Info("the mutatingwebhookconfiguration is created", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + } else if err != nil { + log.V(1).Info("failed to check the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) + return err + } else { + // there is an existing mutatingWebhookConfiguration + if len(foundMwhc.Webhooks) != len(wc.mutatingWebhook.Webhooks) || + !(foundMwhc.Webhooks[0].Name == wc.mutatingWebhook.Webhooks[0].Name && + reflect.DeepEqual(foundMwhc.Webhooks[0].AdmissionReviewVersions, wc.mutatingWebhook.Webhooks[0].AdmissionReviewVersions) && + reflect.DeepEqual(foundMwhc.Webhooks[0].Rules, wc.mutatingWebhook.Webhooks[0].Rules) && + reflect.DeepEqual(foundMwhc.Webhooks[0].ClientConfig.Service, wc.mutatingWebhook.Webhooks[0].ClientConfig.Service)) { + wc.mutatingWebhook.ObjectMeta.ResourceVersion = foundMwhc.ObjectMeta.ResourceVersion + + err := wc.client.Update(context.TODO(), wc.mutatingWebhook) + if err != nil { + log.V(1).Info("failed to update the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) + return err + } + log.V(1).Info("the mutatingwebhookconfiguration is updated", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + } + log.V(1).Info("the mutatingwebhookconfiguration already exists and no change", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + } + } + + if wc.validatingWebhook != nil { + log.V(1).Info("creating or updating the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + foundVwhc := &admissionregistrationv1.ValidatingWebhookConfiguration{} + if err := wc.client.Get(context.TODO(), types.NamespacedName{Name: wc.validatingWebhook.GetName()}, foundVwhc); err != nil && apierrors.IsNotFound(err) { + if err := wc.client.Create(context.TODO(), wc.validatingWebhook); err != nil { + log.V(1).Info("failed to create the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) + return err + } + log.V(1).Info("the validatingwebhookconfiguration is created", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + } else if err != nil { + log.V(1).Info("failed to check the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) + return err + } else { + // there is an existing validatingwebhookconfiguration + if len(foundVwhc.Webhooks) != len(wc.validatingWebhook.Webhooks) || + !(foundVwhc.Webhooks[0].Name == wc.validatingWebhook.Webhooks[0].Name && + reflect.DeepEqual(foundVwhc.Webhooks[0].AdmissionReviewVersions, wc.validatingWebhook.Webhooks[0].AdmissionReviewVersions) && + reflect.DeepEqual(foundVwhc.Webhooks[0].Rules, wc.validatingWebhook.Webhooks[0].Rules) && + reflect.DeepEqual(foundVwhc.Webhooks[0].ClientConfig.Service, wc.validatingWebhook.Webhooks[0].ClientConfig.Service)) { + wc.validatingWebhook.ObjectMeta.ResourceVersion = foundVwhc.ObjectMeta.ResourceVersion + + err := wc.client.Update(context.TODO(), wc.validatingWebhook) + if err != nil { + log.V(1).Info("failed to update the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) + return err + } + log.V(1).Info("the validatingwebhookconfiguration is updated", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + } + log.V(1).Info("the validatingwebhookconfiguration already exists and no change", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + } + } + + // wait for context done signal + <-ctx.Done() + + // currently kubernetes prevents terminating pod from deleting kubernetes resources(including validatingwebhookconfiguration...), see: https://kubernetes.io/blog/2021/05/14/using-finalizers-to-control-deletion/ + // that's why the deleting webhook configuration code is commented + /* + log.V(1).Info("Shutdown signal received, waiting for the webhook cleanup.") + if wc.mutatingWebhook != nil { + // delete the mutatingwebhookconfiguration and ignore error + err := wc.client.Delete(context.TODO(), wc.mutatingWebhook, &client.DeleteOptions{}) + if err != nil { + log.V(1).Info("error to delete the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) + } + } + if wc.validatingWebhook != nil { + // delete the validatingwebhookconfiguration and ignore error + err := wc.client.Delete(context.TODO(), wc.validatingWebhook, &client.DeleteOptions{}) + if err != nil { + log.V(1).Info("error to delete the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) + } + } + log.V(1).Info("webhook clean up") + */ + + return nil +} diff --git a/operators/multiclusterobservability/pkg/webhook/webhook_controller_test.go b/operators/multiclusterobservability/pkg/webhook/webhook_controller_test.go new file mode 100644 index 000000000..e6b2512a1 --- /dev/null +++ b/operators/multiclusterobservability/pkg/webhook/webhook_controller_test.go @@ -0,0 +1,176 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package webhook + +import ( + "context" + "reflect" + "testing" + "time" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestWebhookController(t *testing.T) { + testValidatingWebhookPath := "/validate-testing" + testMutatingWebhookPath := "/mutate-testing" + noSideEffects := admissionregistrationv1.SideEffectClassNone + allScopeType := admissionregistrationv1.AllScopes + webhookServicePort := int32(443) + testmwh := &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testingmwh", + Labels: map[string]string{ + "name": "testingmwh", + }, + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + { + AdmissionReviewVersions: []string{"v1", "v1beta1"}, + Name: "testingmwhook", + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: "testing-webhook-service", + Namespace: "testing-webhook-service-namespace", + Path: &testValidatingWebhookPath, + Port: &webhookServicePort, + }, + CABundle: []byte(""), + }, + SideEffects: &noSideEffects, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{"observability.open-cluster-management.io"}, + APIVersions: []string{"v1beta2"}, + Resources: []string{"multiclusterobservabilities"}, + Scope: &allScopeType, + }, + }, + }, + }, + }, + } + testvwh := &admissionregistrationv1.ValidatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testingvwh", + Labels: map[string]string{ + "name": "testingvwh", + }, + }, + Webhooks: []admissionregistrationv1.ValidatingWebhook{ + { + AdmissionReviewVersions: []string{"v1", "v1beta1"}, + Name: "testingvwhook", + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: "testing-webhook-service", + Namespace: "testing-webhook-service-namespace", + Path: &testMutatingWebhookPath, + Port: &webhookServicePort, + }, + CABundle: []byte(""), + }, + SideEffects: &noSideEffects, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + admissionregistrationv1.Update, + }, + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{"observability.open-cluster-management.io"}, + APIVersions: []string{"v1beta2"}, + Resources: []string{"multiclusterobservabilities"}, + Scope: &allScopeType, + }, + }, + }, + }, + }, + } + cases := []struct { + name string + existingmwh *admissionregistrationv1.MutatingWebhookConfiguration + existingvwh *admissionregistrationv1.ValidatingWebhookConfiguration + reconciledmwh *admissionregistrationv1.MutatingWebhookConfiguration + reconciledvwh *admissionregistrationv1.ValidatingWebhookConfiguration + }{ + // { + // "no existing and reconciled webhook configurations", + // nil, + // nil, + // nil, + // nil, + // }, + { + "no existing webhook configurations and create the reconciled webhook configurations", + nil, + nil, + testmwh, + testvwh, + }, + { + "existing webhook configurations and create the reconciled webhook configurations", + testmwh, + testvwh, + testmwh, + testvwh, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + objs := []runtime.Object{} + if c.existingmwh != nil { + objs = append(objs, c.existingmwh) + } + if c.existingvwh != nil { + objs = append(objs, c.existingvwh) + } + cl := fake.NewFakeClient(objs...) + wc := NewWebhookController(cl, c.reconciledmwh, c.reconciledvwh) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + wc.Start(ctx) + }() + time.Sleep(1 * time.Second) + cancel() + + if c.reconciledmwh != nil { + foundMwhc := &admissionregistrationv1.MutatingWebhookConfiguration{} + if err := cl.Get(context.TODO(), types.NamespacedName{Name: c.reconciledmwh.GetName()}, foundMwhc); err != nil { + t.Fatalf("failed to get the mutating webhook configuration: %v", err) + } + if !(foundMwhc.Webhooks[0].Name == c.reconciledmwh.Webhooks[0].Name && + reflect.DeepEqual(foundMwhc.Webhooks[0].AdmissionReviewVersions, c.reconciledmwh.Webhooks[0].AdmissionReviewVersions) && + reflect.DeepEqual(foundMwhc.Webhooks[0].Rules, c.reconciledmwh.Webhooks[0].Rules) && + reflect.DeepEqual(foundMwhc.Webhooks[0].ClientConfig.Service, c.reconciledmwh.Webhooks[0].ClientConfig.Service)) { + t.Errorf("Got differences between the found MutatingWebhookConfiguration and reconciled MutatingWebhookConfiguration:\nfound:%v\nreconciled:%v\n", foundMwhc, c.reconciledmwh) + } + } + + if c.reconciledvwh != nil { + foundVwhc := &admissionregistrationv1.ValidatingWebhookConfiguration{} + if err := cl.Get(context.TODO(), types.NamespacedName{Name: c.reconciledvwh.GetName()}, foundVwhc); err != nil { + t.Fatalf("failed to get the validating webhook configuration: %v", err) + } + if !(foundVwhc.Webhooks[0].Name == c.reconciledvwh.Webhooks[0].Name && + reflect.DeepEqual(foundVwhc.Webhooks[0].AdmissionReviewVersions, c.reconciledvwh.Webhooks[0].AdmissionReviewVersions) && + reflect.DeepEqual(foundVwhc.Webhooks[0].Rules, c.reconciledvwh.Webhooks[0].Rules) && + reflect.DeepEqual(foundVwhc.Webhooks[0].ClientConfig.Service, c.reconciledvwh.Webhooks[0].ClientConfig.Service)) { + t.Errorf("Got differences between the found ValidatingWebhookConfiguration and reconciled ValidatingWebhookConfiguration:\nfound:%v\nreconciled:%v\n", foundVwhc, c.reconciledvwh) + } + } + }) + } +} diff --git a/operators/multiclusterobservability/prestop.sh b/operators/multiclusterobservability/prestop.sh new file mode 100755 index 000000000..9d86f2b48 --- /dev/null +++ b/operators/multiclusterobservability/prestop.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -exo pipefail + +# Point to the internal API server hostname +APISERVER=https://kubernetes.default.svc + +# Path to ServiceAccount token +SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + +# Read this Pod's namespace +NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + +# Read the ServiceAccount bearer token +TOKEN=$(cat ${SERVICEACCOUNT}/token) + +# Reference the internal certificate authority (CA) +CACERT=${SERVICEACCOUNT}/ca.crt + +ValidatingWebhookConfigurationName=multicluster-observability-operator + +# Delete the validatingwebhookconfiguration with TOKEN +curl --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" -X DELETE ${APISERVER}/apis/admissionregistration.k8s.io/v1/validatingwebhookconfigurations/${ValidatingWebhookConfigurationName} + diff --git a/operators/pkg/config/config.go b/operators/pkg/config/config.go new file mode 100644 index 000000000..50445fbb3 --- /dev/null +++ b/operators/pkg/config/config.go @@ -0,0 +1,60 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +const ( + ClusterNameKey = "cluster-name" + HubInfoSecretName = "hub-info-secret" + HubInfoSecretKey = "hub-info.yaml" // #nosec + ObservatoriumAPIRemoteWritePath = "/api/metrics/v1/default/api/v1/receive" + AnnotationSkipCreation = "skip-creation-if-exist" + + CollectorImage = "COLLECTOR_IMAGE" + InstallPrometheus = "INSTALL_PROM" + PullSecret = "PULL_SECRET" + ImageConfigMap = "images-list" + AllowlistConfigMapName = "observability-metrics-allowlist" +) + +const ( + MetricsCollectorImgName = "metrics-collector" + MetricsCollectorKey = "metrics_collector" + + PrometheusImgName = "prometheus" + PrometheusKey = "prometheus" + + KubeStateMetricsImgName = "kube-state-metrics" + KubeStateMetricsKey = "kube_state_metrics" + + NodeExporterImgName = "node-exporter" + NodeExporterKey = "node_exporter" + + KubeRbacProxyImgName = "kube-rbac-proxy" + KubeRbacProxyKey = "kube_rbac_proxy" + + ConfigmapReloaderImgName = "origin-configmap-reloader" + ConfigmapReloaderKey = "prometheus-config-reloader" +) + +var ( + ImageKeyNameMap = map[string]string{ + PrometheusKey: PrometheusKey, + KubeStateMetricsKey: KubeStateMetricsImgName, + NodeExporterKey: NodeExporterImgName, + KubeRbacProxyKey: KubeRbacProxyImgName, + MetricsCollectorKey: MetricsCollectorImgName, + ConfigmapReloaderKey: ConfigmapReloaderImgName, + } +) + +// HubInfo is the struct that contains the common information about the hub +// cluster, for example the name of managed cluster on the hub, the URL of +// observatorium api gateway, the URL of hub alertmanager and the CA for the +// hub router +type HubInfo struct { + ClusterName string `yaml:"cluster-name"` + ObservatoriumAPIEndpoint string `yaml:"observatorium-api-endpoint"` + AlertmanagerEndpoint string `yaml:"alertmanager-endpoint"` + AlertmanagerRouterCA string `yaml:"alertmanager-router-ca"` +} diff --git a/operators/pkg/deploying/deployer.go b/operators/pkg/deploying/deployer.go new file mode 100644 index 000000000..7dba6bd2a --- /dev/null +++ b/operators/pkg/deploying/deployer.go @@ -0,0 +1,245 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package deploying + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/stolostron/multicluster-observability-operator/operators/pkg/config" +) + +var log = logf.Log.WithName("deploying") + +type deployerFn func(*unstructured.Unstructured, *unstructured.Unstructured) error + +// Deployer is used create or update the resources +type Deployer struct { + client client.Client + deployerFns map[string]deployerFn +} + +// NewDeployer inits the deployer +func NewDeployer(client client.Client) *Deployer { + deployer := &Deployer{client: client} + deployer.deployerFns = map[string]deployerFn{ + "Deployment": deployer.updateDeployment, + "StatefulSet": deployer.updateStatefulSet, + "Service": deployer.updateService, + "ConfigMap": deployer.updateConfigMap, + "Secret": deployer.updateSecret, + "ClusterRole": deployer.updateClusterRole, + "ClusterRoleBinding": deployer.updateClusterRoleBinding, + } + return deployer +} + +// Deploy is used to create or update the resources +func (d *Deployer) Deploy(obj *unstructured.Unstructured) error { + found := &unstructured.Unstructured{} + found.SetGroupVersionKind(obj.GroupVersionKind()) + err := d.client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, found) + if err != nil { + if errors.IsNotFound(err) { + log.Info("Create", "Kind:", obj.GroupVersionKind(), "Name:", obj.GetName()) + return d.client.Create(context.TODO(), obj) + } + return err + } + + // if resource has annotation skip-creation-if-exist: true, don't update it to keep customized changes from users + metadata, ok := obj.Object["metadata"].(map[string]interface{}) + if ok { + annotations, ok := metadata["annotations"].(map[string]interface{}) + if ok && annotations != nil && annotations[config.AnnotationSkipCreation] != nil { + if strings.ToLower(annotations[config.AnnotationSkipCreation].(string)) == "true" { + log.Info("Skip creation", "Kind:", obj.GroupVersionKind(), "Name:", obj.GetName()) + return nil + } + } + } + + deployerFn, ok := d.deployerFns[found.GetKind()] + if ok { + return deployerFn(obj, found) + } + return nil +} + +func (d *Deployer) updateDeployment(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeDepoly := &appsv1.Deployment{} + err := json.Unmarshal(runtimeJSON, runtimeDepoly) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Deployment %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredDepoly := &appsv1.Deployment{} + err = json.Unmarshal(desiredJSON, desiredDepoly) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal Deployment %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredDepoly.Spec, runtimeDepoly.Spec) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredDepoly) + } + + return nil +} + +func (d *Deployer) updateStatefulSet(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeDepoly := &appsv1.StatefulSet{} + err := json.Unmarshal(runtimeJSON, runtimeDepoly) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime StatefulSet %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredDepoly := &appsv1.StatefulSet{} + err = json.Unmarshal(desiredJSON, desiredDepoly) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal StatefulSet %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredDepoly.Spec.Template, runtimeDepoly.Spec.Template) || + !apiequality.Semantic.DeepDerivative(desiredDepoly.Spec.Replicas, runtimeDepoly.Spec.Replicas) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + runtimeDepoly.Spec.Replicas = desiredDepoly.Spec.Replicas + runtimeDepoly.Spec.Template = desiredDepoly.Spec.Template + return d.client.Update(context.TODO(), runtimeDepoly) + } + + return nil +} + +func (d *Deployer) updateService(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeService := &corev1.Service{} + err := json.Unmarshal(runtimeJSON, runtimeService) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Service %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredService := &corev1.Service{} + err = json.Unmarshal(desiredJSON, desiredService) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal Service %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredService.Spec, runtimeService.Spec) { + desiredService.ObjectMeta.ResourceVersion = runtimeService.ObjectMeta.ResourceVersion + desiredService.Spec.ClusterIP = runtimeService.Spec.ClusterIP + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredService) + } + + return nil +} + +func (d *Deployer) updateConfigMap(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeConfigMap := &corev1.ConfigMap{} + err := json.Unmarshal(runtimeJSON, runtimeConfigMap) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ConfigMap %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredConfigMap := &corev1.ConfigMap{} + err = json.Unmarshal(desiredJSON, desiredConfigMap) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal ConfigMap %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredConfigMap.Data, runtimeConfigMap.Data) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredConfigMap) + } + + return nil +} + +func (d *Deployer) updateSecret(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeSecret := &corev1.Secret{} + err := json.Unmarshal(runtimeJSON, runtimeSecret) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Secret %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredSecret := &corev1.Secret{} + err = json.Unmarshal(desiredJSON, desiredSecret) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal desired Secret %s", desiredObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredSecret.Data, runtimeSecret.Data) { + log.Info("Update", "Kind:", desiredObj.GroupVersionKind(), "Name:", desiredObj.GetName()) + return d.client.Update(context.TODO(), desiredSecret) + } + return nil +} + +func (d *Deployer) updateClusterRole(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeClusterRole := &rbacv1.ClusterRole{} + err := json.Unmarshal(runtimeJSON, runtimeClusterRole) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ClusterRole %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredClusterRole := &rbacv1.ClusterRole{} + err = json.Unmarshal(desiredJSON, desiredClusterRole) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal desired ClusterRole %s", desiredObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredClusterRole.Rules, runtimeClusterRole.Rules) || + !apiequality.Semantic.DeepDerivative(desiredClusterRole.AggregationRule, runtimeClusterRole.AggregationRule) { + log.Info("Update", "Kind:", desiredObj.GroupVersionKind(), "Name:", desiredObj.GetName()) + return d.client.Update(context.TODO(), desiredClusterRole) + } + return nil +} + +func (d *Deployer) updateClusterRoleBinding(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeClusterRoleBinding := &rbacv1.ClusterRoleBinding{} + err := json.Unmarshal(runtimeJSON, runtimeClusterRoleBinding) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime ClusterRoleBinding %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredClusterRoleBinding := &rbacv1.ClusterRoleBinding{} + err = json.Unmarshal(desiredJSON, desiredClusterRoleBinding) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal desired ClusterRoleBinding %s", desiredObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredClusterRoleBinding.Subjects, runtimeClusterRoleBinding.Subjects) || + !apiequality.Semantic.DeepDerivative(desiredClusterRoleBinding.RoleRef, runtimeClusterRoleBinding.RoleRef) { + log.Info("Update", "Kind:", desiredObj.GroupVersionKind(), "Name:", desiredObj.GetName()) + return d.client.Update(context.TODO(), desiredClusterRoleBinding) + } + return nil +} diff --git a/operators/pkg/deploying/deployer_test.go b/operators/pkg/deploying/deployer_test.go new file mode 100644 index 000000000..415d4d46e --- /dev/null +++ b/operators/pkg/deploying/deployer_test.go @@ -0,0 +1,453 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package deploying + +import ( + "context" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + replicas1 int32 = 1 + replicas2 int32 = 2 +) + +func TestDeploy(t *testing.T) { + + cases := []struct { + name string + createObj runtime.Object + updateObj runtime.Object + validateResults func(client client.Client) + expectedErr string + }{ + { + name: "create and update the deployment", + createObj: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "ns1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas1, + }, + }, + updateObj: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "ns1", + ResourceVersion: "1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &(replicas2), + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-deployment", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + client.Get(context.Background(), namespacedName, obj) + + if *obj.Spec.Replicas != 2 { + t.Fatalf("fail to update the deployment") + } + }, + }, + { + name: "create and no update the deployment", + createObj: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-2", + Namespace: "ns1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas1, + }, + }, + updateObj: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-2", + Namespace: "ns1", + Labels: map[string]string{ + "test-label": "label-value", + }, + ResourceVersion: "1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas1, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-deployment-2", + Namespace: "ns1", + } + obj := &appsv1.Deployment{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.ObjectMeta.GetLabels()) != 0 { + t.Fatalf("should not update the deployment") + } + }, + }, + { + name: "create and update the statefulset", + createObj: &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulSet", + Namespace: "ns1", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas1, + }, + }, + updateObj: &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulSet", + Namespace: "ns1", + ResourceVersion: "1", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &(replicas2), + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-statefulSet", + Namespace: "ns1", + } + obj := &appsv1.StatefulSet{} + client.Get(context.Background(), namespacedName, obj) + + if *obj.Spec.Replicas != 2 { + t.Fatalf("fail to update the statefulset") + } + }, + }, + { + name: "create and update the configmap", + createObj: &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + }, + Data: map[string]string{ + "test-key": "test-value-1", + }, + }, + updateObj: &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string]string{ + "test-key": "test-value-2", + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-cm", + Namespace: "ns2", + } + obj := &corev1.ConfigMap{} + client.Get(context.Background(), namespacedName, obj) + + if obj.Data["test-key"] != "test-value-2" { + t.Fatalf("fail to update the configmap") + } + }, + }, + { + name: "create and update the service", + createObj: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-svc", + Namespace: "ns2", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "myApp-1", + }, + }, + }, + updateObj: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-svc", + Namespace: "ns2", + ResourceVersion: "1", + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + "app": "myApp-2", + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-svc", + Namespace: "ns2", + } + obj := &corev1.Service{} + client.Get(context.Background(), namespacedName, obj) + + if obj.Spec.Selector["app"] != "myApp-2" { + t.Fatalf("fail to update the service") + } + }, + }, + { + name: "create and update the secret", + createObj: &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "ns2", + }, + }, + updateObj: &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret", + Namespace: "ns2", + ResourceVersion: "1", + }, + Data: map[string][]byte{ + "username": []byte("YWRtaW4="), + "password": []byte("MWYyZDFlMmU2N2Rm"), + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-secret", + Namespace: "ns2", + } + obj := &corev1.Secret{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.Data) == 0 { + t.Fatalf("fail to update the secret") + } + }, + }, + { + name: "create and update the clusterrole", + createObj: &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole", + }, + }, + updateObj: &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole", + ResourceVersion: "1", + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + "list", + "get", + }, + APIGroups: []string{ + "", + }, + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-clusterrole", + } + obj := &rbacv1.ClusterRole{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.Rules) == 0 { + t.Fatalf("fail to update the clusterrole") + } + }, + }, + { + name: "create and update the clusterrolebinding", + createObj: &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding", + }, + }, + updateObj: &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding", + ResourceVersion: "1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-clusterrole", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns2", + }, + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-clusterrolebinding", + } + obj := &rbacv1.ClusterRoleBinding{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.Subjects) == 0 { + t.Fatalf("fail to update the clusterrolebinding") + } + }, + }, + { + name: "serviceaccount update is not supported", + createObj: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "ns1", + }, + }, + updateObj: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "ns1", + Labels: map[string]string{ + "test-label": "label-value", + }, + ResourceVersion: "1", + }, + }, + validateResults: func(client client.Client) { + namespacedName := types.NamespacedName{ + Name: "test-sa", + Namespace: "ns1", + } + obj := &corev1.ServiceAccount{} + client.Get(context.Background(), namespacedName, obj) + + if len(obj.GetObjectMeta().GetLabels()) != 0 { + t.Fatalf("update serviceaccount is not supported then") + } + }, + }, + } + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + appsv1.AddToScheme(scheme) + rbacv1.AddToScheme(scheme) + client := fake.NewFakeClientWithScheme(scheme, []runtime.Object{}...) + + deployer := NewDeployer(client) + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + createObjUns, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(c.createObj) + err := deployer.Deploy(&unstructured.Unstructured{Object: createObjUns}) + if err != nil { + t.Fatalf("Cannot create the deployment %v", err) + } + if c.updateObj != nil { + updateObjUns, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(c.updateObj) + err = deployer.Deploy(&unstructured.Unstructured{Object: updateObjUns}) + if err != nil { + t.Fatalf("Cannot update the deployment %v", err) + } + } + + c.validateResults(client) + }) + } + +} diff --git a/operators/pkg/rendering/patching/patcher.go b/operators/pkg/rendering/patching/patcher.go new file mode 100644 index 000000000..20baea81a --- /dev/null +++ b/operators/pkg/rendering/patching/patcher.go @@ -0,0 +1,312 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package patching + +import ( + "fmt" + "sort" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/v3/k8sdeps/kunstruct" + "sigs.k8s.io/kustomize/v3/pkg/ifc" + "sigs.k8s.io/kustomize/v3/pkg/resource" + "sigs.k8s.io/yaml" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +const ( + specFirstContainer = "spec.template.spec.containers[0]" +) + +type patchGenerateFn func( + res *resource.Resource, + mco *mcov1beta2.MultiClusterObservability) (ifc.Kunstructured, error) + +func ApplyGlobalPatches(res *resource.Resource, mco *mcov1beta2.MultiClusterObservability) error { + + // for _, generate := range []patchGenerateFn{ + // //generateImagePatch, + // //generateImagePullSecretsPatch, + // generateNodeSelectorPatch, + // } { + // patch, err := generate(res, mco) + // if err != nil { + // return err + // } + // if patch == nil { + // continue + // } + // if err = res.Patch(patch); err != nil { + // return err + // } + // } + return nil +} + +func generateImagePatch( + res *resource.Resource, + mco *mcov1beta2.MultiClusterObservability) (ifc.Kunstructured, error) { + imageFromTemplate, err := res.GetString(specFirstContainer + ".image") // need to loop through all images + if err != nil { + return nil, err + } + imageRepo := util.GetAnnotation(mco.GetAnnotations(), mcoconfig.AnnotationKeyImageRepository) + imageTagSuffix := util.GetAnnotation(mco.GetAnnotations(), mcoconfig.AnnotationKeyImageTagSuffix) + if imageTagSuffix != "" { + imageTagSuffix = "-" + imageTagSuffix + } + generatedImage := fmt.Sprintf("%s/%s%s", imageRepo, imageFromTemplate, imageTagSuffix) + + container, _ := res.GetFieldValue(specFirstContainer) + containerMap, _ := container.(map[string]interface{}) + containerMap["image"] = generatedImage + containerMap["imagePullPolicy"] = mcoconfig.GetImagePullPolicy(mco.Spec) + + return newKunstructuredForSpecContainers(containerMap), nil +} + +/* #nosec */ +const imagePullSecretsTemplate = ` +kind: __kind__ +spec: + template: + spec: + imagePullSecrets: + - name: __pullsecrets__ +` + +func generateImagePullSecretsPatch( + res *resource.Resource, + mco *mcov1beta2.MultiClusterObservability) (ifc.Kunstructured, error) { + + pullSecret := mcoconfig.GetImagePullSecret(mco.Spec) + + template := strings.Replace(imagePullSecretsTemplate, "__kind__", res.GetKind(), 1) + template = strings.Replace(template, "__pullsecrets__", pullSecret, 1) + json, err := yaml.YAMLToJSON([]byte(template)) + if err != nil { + return nil, err + } + var u unstructured.Unstructured + err = u.UnmarshalJSON(json) + return &kunstruct.UnstructAdapter{Unstructured: u}, err +} + +// const nodeSelectorTemplate = ` +// kind: __kind__ +// spec: +// template: +// spec: +// nodeSelector: {__selector__} +// ` + +// func generateNodeSelectorPatch( +// res *resource.Resource, +// mco *mcov1beta2.MultiClusterObservability) (ifc.Kunstructured, error) { + +// nodeSelectorOptions := mco.Spec.NodeSelector +// if nodeSelectorOptions == nil { +// return nil, nil +// } +// template := strings.Replace(nodeSelectorTemplate, "__kind__", res.GetKind(), 1) +// selectormap := map[string]string{} +// if nodeSelectorOptions.OS != "" { +// selectormap["beta.kubernetes.io/os"] = nodeSelectorOptions.OS +// } +// if nodeSelectorOptions.CustomLabelSelector != "" && nodeSelectorOptions.CustomLabelValue != "" { +// selectormap[nodeSelectorOptions.CustomLabelSelector] = nodeSelectorOptions.CustomLabelValue +// } +// if len(selectormap) == 0 { +// return nil, nil +// } +// selectors := []string{} +// for k, v := range selectormap { +// selectors = append(selectors, fmt.Sprintf("\"%s\":\"%s\"", k, v)) +// } +// template = strings.Replace(template, "__selector__", strings.Join(selectors, ","), 1) +// json, err := yaml.YAMLToJSON([]byte(template)) +// if err != nil { +// return nil, err +// } +// var u unstructured.Unstructured +// err = u.UnmarshalJSON(json) +// return &kunstruct.UnstructAdapter{Unstructured: u}, err +// } + +func generateReplicasPatch(replicas int32) ifc.Kunstructured { + return kunstruct.NewKunstructuredFactoryImpl().FromMap(map[string]interface{}{ + "spec": map[string]interface{}{ + "replicas": replicas, + }, + }) +} + +func generateContainerArgsPatch(r *resource.Resource, newArgs map[string]string) (ifc.Kunstructured, error) { + originalArgs, err := r.Kunstructured.GetStringSlice(specFirstContainer + ".args") + if err != nil { + return nil, err + } + + cmd, originalArgs := splitArgs(originalArgs) + + argsMap := toArgsMap(originalArgs) + + for newkey, newval := range newArgs { + argsMap[fmt.Sprintf("--%s", newkey)] = newval + } + + args := []string{} + for k, v := range argsMap { + arg := fmt.Sprintf("%s=%s", k, v) + if v == "" { + arg = k + } + args = append(args, arg) + } + sort.Strings(args) + if cmd != "" { + args = append([]string{cmd}, args...) + } + + container, _ := r.GetFieldValue(specFirstContainer) + containerMap, _ := container.(map[string]interface{}) + containerMap["args"] = args + + return newKunstructuredForSpecContainers(containerMap), nil +} + +func generateEnvVarsPatch(r *resource.Resource, newEnvs []corev1.EnvVar) (ifc.Kunstructured, error) { + origianl, err := r.GetSlice(specFirstContainer + ".env") + if err != nil { + return nil, err + } + + envMap := toNamedObjsMap(origianl) + for _, newEnv := range newEnvs { + envMap[newEnv.Name] = newEnv + } + + envs := []interface{}{} + for _, envName := range getSortedKeys(envMap) { + envs = append(envs, envMap[envName]) + } + + container, _ := r.GetFieldValue(specFirstContainer) + containerMap, _ := container.(map[string]interface{}) + containerMap["env"] = envs + + return newKunstructuredForSpecContainers(containerMap), nil +} + +func generateVolumesPatch(r *resource.Resource, newVolumes []corev1.Volume) (ifc.Kunstructured, error) { + origianl, err := r.GetSlice("spec.template.spec.volumes") + if err != nil { + return nil, err + } + + volumesMap := toNamedObjsMap(origianl) + for _, newVolume := range newVolumes { + volumesMap[newVolume.Name] = newVolume + } + + volumes := []interface{}{} + for _, volumeName := range getSortedKeys(volumesMap) { + volumes = append(volumes, volumesMap[volumeName]) + } + + return kunstruct.NewKunstructuredFactoryImpl().FromMap(map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "volumes": volumes, + }, + }, + }, + }), nil +} + +func generateVolumeMountPatch(r *resource.Resource, newVolumeMounts []corev1.VolumeMount) (ifc.Kunstructured, error) { + origianl, err := r.GetSlice(specFirstContainer + ".volumeMounts") + if err != nil { + return nil, err + } + volumeMountMap := toNamedObjsMap(origianl) + for _, newVolumeMount := range newVolumeMounts { + volumeMountMap[newVolumeMount.Name] = newVolumeMount + } + + envs := []interface{}{} + for _, envName := range getSortedKeys(volumeMountMap) { + envs = append(envs, volumeMountMap[envName]) + } + + container, _ := r.GetFieldValue(specFirstContainer) + containerMap, _ := container.(map[string]interface{}) + containerMap["volumeMounts"] = envs + + return newKunstructuredForSpecContainers(containerMap), nil +} + +func splitArgs(args []string) (string, []string) { + cmd := args[0] + if !strings.HasPrefix(cmd, "--") { + return cmd, args[1:] + } + return "", args +} + +func toArgsMap(args []string) map[string]string { + argsmap := map[string]string{} + for _, arg := range args { + index := strings.Index(arg, "=") + if index == -1 { + argsmap[arg] = "" + continue + } + argsmap[arg[0:strings.Index(arg, "=")]] = arg[strings.Index(arg, "=")+1:] + } + return argsmap +} + +func toNamedObjsMap(objs []interface{}) map[string]interface{} { + objsMap := map[string]interface{}{} + for _, obj := range objs { + objmap, ok := obj.(map[string]interface{}) + if !ok { + continue + } + name, ok := objmap["name"] + if !ok { + continue + } + objsMap[fmt.Sprintf("%s", name)] = obj + } + return objsMap +} + +func getSortedKeys(objMap map[string]interface{}) []string { + keys := []string{} + for k := range objMap { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func newKunstructuredForSpecContainers(srcMap map[string]interface{}) ifc.Kunstructured { + return kunstruct.NewKunstructuredFactoryImpl().FromMap(map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []interface{}{srcMap}, + }, + }, + }, + }) +} diff --git a/operators/pkg/rendering/patching/patcher_test.go b/operators/pkg/rendering/patching/patcher_test.go new file mode 100644 index 000000000..b23a441d0 --- /dev/null +++ b/operators/pkg/rendering/patching/patcher_test.go @@ -0,0 +1,71 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package patching + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/v3/k8sdeps/kunstruct" + "sigs.k8s.io/kustomize/v3/pkg/resource" + "sigs.k8s.io/yaml" + + mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" +) + +var apiserver = ` +kind: Deployment +apiVersion: apps/v1 +metadata: + name: mcm-apiserver + labels: + app: "mcm-apiserver" +spec: + template: + spec: + volumes: + - name: apiserver-cert + secret: + secretName: "test" + containers: + - name: mcm-apiserver + image: "mcm-api" + env: + - name: MYHUBNAME + value: test + volumeMounts: [] + args: + - "/mcm-apiserver" + - "--enable-admission-plugins=HCMUserIdentity,KlusterletCA,NamespaceLifecycle" +` + +var factory = resource.NewFactory(kunstruct.NewKunstructuredFactoryImpl()) + +func TestApplyGlobalPatches(t *testing.T) { + json, err := yaml.YAMLToJSON([]byte(apiserver)) + if err != nil { + t.Fatalf("failed to apply global patches %v", err) + } + var u unstructured.Unstructured + u.UnmarshalJSON(json) + apiserver := factory.FromMap(u.Object) + + mchcr := &mcov1beta2.MultiClusterObservability{ + TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Annotations: map[string]string{"mco-imageRepository": "quay.io/stolostron"}, + }, + Spec: mcov1beta2.MultiClusterObservabilitySpec{ + ImagePullPolicy: "Always", + ImagePullSecret: "test", + }, + } + + err = ApplyGlobalPatches(apiserver, mchcr) + if err != nil { + t.Fatalf("failed to apply global patches %v", err) + } +} diff --git a/operators/pkg/rendering/renderer.go b/operators/pkg/rendering/renderer.go new file mode 100644 index 000000000..8421734e4 --- /dev/null +++ b/operators/pkg/rendering/renderer.go @@ -0,0 +1,140 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rendering + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/v3/pkg/resource" +) + +const ( + metadataErr = "failed to find metadata field" + nsUpdateAnnoKey = "update-namespace" +) + +type RenderFn func(*resource.Resource, string, map[string]string) (*unstructured.Unstructured, error) + +type Renderer struct { + renderFns map[string]RenderFn +} + +func NewRenderer() *Renderer { + renderer := &Renderer{} + renderer.renderFns = map[string]RenderFn{ + "Deployment": renderer.RenderDeployments, + "StatefulSet": renderer.RenderNamespace, + "DaemonSet": renderer.RenderNamespace, + "Service": renderer.RenderNamespace, + "ServiceAccount": renderer.RenderNamespace, + "ConfigMap": renderer.RenderNamespace, + "ClusterRole": renderer.RenderClusterRole, + "ClusterRoleBinding": renderer.RenderClusterRoleBinding, + "Secret": renderer.RenderNamespace, + "Role": renderer.RenderNamespace, + "RoleBinding": renderer.RenderNamespace, + "Ingress": renderer.RenderNamespace, + "PersistentVolumeClaim": renderer.RenderNamespace, + } + return renderer +} + +func (r *Renderer) RenderTemplates(templates []*resource.Resource, namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { + uobjs := []*unstructured.Unstructured{} + for _, template := range templates { + render, ok := r.renderFns[template.GetKind()] + if !ok { + uobjs = append(uobjs, &unstructured.Unstructured{Object: template.Map()}) + continue + } + uobj, err := render(template.DeepCopy(), namespace, labels) + if err != nil { + return []*unstructured.Unstructured{}, err + } + if uobj == nil { + continue + } + uobjs = append(uobjs, uobj) + + } + + return uobjs, nil +} + +func (r *Renderer) RenderDeployments(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + /* err := patching.ApplyGlobalPatches(res, r.cr) + if err != nil { + return nil, err + } */ + + res.SetNamespace(namespace) + u := &unstructured.Unstructured{Object: res.Map()} + return u, nil +} + +func (r *Renderer) RenderNamespace(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u := &unstructured.Unstructured{Object: res.Map()} + if UpdateNamespace(u) { + res.SetNamespace(namespace) + } + + return u, nil +} + +func (r *Renderer) RenderClusterRole(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u := &unstructured.Unstructured{Object: res.Map()} + + cLabels := u.GetLabels() + if cLabels == nil { + cLabels = make(map[string]string) + } + for k, v := range labels { + cLabels[k] = v + } + u.SetLabels(cLabels) + + return u, nil +} + +func (r *Renderer) RenderClusterRoleBinding(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { + u := &unstructured.Unstructured{Object: res.Map()} + + cLabels := u.GetLabels() + if cLabels == nil { + cLabels = make(map[string]string) + } + for k, v := range labels { + cLabels[k] = v + } + u.SetLabels(cLabels) + + subjects, ok := u.Object["subjects"].([]interface{}) + if !ok { + return nil, fmt.Errorf("failed to find clusterrolebinding subjects field") + } + subject := subjects[0].(map[string]interface{}) + kind := subject["kind"] + if kind == "Group" { + return u, nil + } + + if UpdateNamespace(u) { + subject["namespace"] = namespace + } + + return u, nil +} + +// UpdateNamespace checks for annotiation to update NS +func UpdateNamespace(u *unstructured.Unstructured) bool { + annotations := u.GetAnnotations() + v, ok := annotations[nsUpdateAnnoKey] + if !ok { + return true + } + ret, _ := strconv.ParseBool(v) + return ret +} diff --git a/operators/pkg/rendering/templates/templates.go b/operators/pkg/rendering/templates/templates.go new file mode 100644 index 000000000..130c1f193 --- /dev/null +++ b/operators/pkg/rendering/templates/templates.go @@ -0,0 +1,101 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package templates + +import ( + "log" + "os" + "sync" + + "sigs.k8s.io/kustomize/v3/k8sdeps/kunstruct" + "sigs.k8s.io/kustomize/v3/k8sdeps/transformer" + "sigs.k8s.io/kustomize/v3/k8sdeps/validator" + "sigs.k8s.io/kustomize/v3/pkg/fs" + "sigs.k8s.io/kustomize/v3/pkg/loader" + "sigs.k8s.io/kustomize/v3/pkg/plugins" + "sigs.k8s.io/kustomize/v3/pkg/resmap" + "sigs.k8s.io/kustomize/v3/pkg/resource" + "sigs.k8s.io/kustomize/v3/pkg/target" +) + +const ( + TemplatesPathEnvVar = "TEMPLATES_PATH" +) + +var ( + loadTemplateRendererOnce sync.Once + templateRenderer *TemplateRenderer + templatesPath = "/usr/local/manifests" +) + +func GetTemplateRenderer() *TemplateRenderer { + loadTemplateRendererOnce.Do(func() { + templatesPathInEnv, found := os.LookupEnv(TemplatesPathEnvVar) + if found { + templatesPath = templatesPathInEnv + } + templateRenderer = &TemplateRenderer{ + templatesPath: templatesPath, + templates: map[string]resmap.ResMap{}, + } + }) + return templateRenderer +} + +type TemplateRenderer struct { + templatesPath string + templates map[string]resmap.ResMap +} + +func NewTemplateRenderer(path string) *TemplateRenderer { + return &TemplateRenderer{ + templatesPath: path, + templates: map[string]resmap.ResMap{}, + } +} + +func (r *TemplateRenderer) GetTemplatesPath() string { + return r.templatesPath +} + +func (r *TemplateRenderer) AddTemplateFromPath(kustomizationPath string, resourceList *[]*resource.Resource) error { + var err error + resMap, ok := r.templates[kustomizationPath] + if !ok { + resMap, err = r.render(kustomizationPath) + if err != nil { + log.Printf("Cannot find this path %v, %v", kustomizationPath, err) + return nil + } + r.templates[kustomizationPath] = resMap + } + *resourceList = append(*resourceList, resMap.Resources()...) + return nil +} + +func (r *TemplateRenderer) render(kustomizationPath string) (resmap.ResMap, error) { + ldr, err := loader.NewLoader( + loader.RestrictionRootOnly, + validator.NewKustValidator(), + kustomizationPath, + fs.MakeFsOnDisk(), + ) + + if err != nil { + return nil, err + } + defer func() { + if err := ldr.Cleanup(); err != nil { + log.Printf("failed to clean up loader, %v", err) + } + }() + pf := transformer.NewFactoryImpl() + rf := resmap.NewFactory(resource.NewFactory(kunstruct.NewKunstructuredFactoryImpl()), pf) + pl := plugins.NewLoader(plugins.DefaultPluginConfig(), rf) + kt, err := target.NewKustTarget(ldr, rf, pf, pl) + if err != nil { + return nil, err + } + return kt.MakeCustomizedResMap() +} diff --git a/operators/pkg/util/obj_compare.go b/operators/pkg/util/obj_compare.go new file mode 100644 index 000000000..4ac3c79ce --- /dev/null +++ b/operators/pkg/util/obj_compare.go @@ -0,0 +1,223 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "reflect" + "strings" + + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/yaml" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + mcov1beta1 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta1" +) + +type compFn func(runtime.Object, runtime.Object) bool + +var log = logf.Log.WithName("obj_compare") + +var compFns = map[string]compFn{ + "Namespace": compareNamespaces, + "Deployment": compareDeployments, + "ServiceAccount": compareServiceAccounts, + "ClusterRole": compareClusterRoles, + "ClusterRoleBinding": compareClusterRoleBindings, + "Secret": compareSecrets, + "Service": compareServices, + "ConfigMap": compareConfigMap, + "CustomResourceDefinition": compareCRD, + "ObservabilityAddon": compareObsAddon, +} + +// GetK8sObj is used to get k8s struct based on the passed-in Kind name +func GetK8sObj(kind string) runtime.Object { + objs := map[string]runtime.Object{ + "Namespace": &corev1.Namespace{}, + "Deployment": &v1.Deployment{}, + "StatefulSet": &v1.StatefulSet{}, + "DaemonSet": &v1.DaemonSet{}, + "ClusterRole": &rbacv1.ClusterRole{}, + "ClusterRoleBinding": &rbacv1.ClusterRoleBinding{}, + "ServiceAccount": &corev1.ServiceAccount{}, + "PersistentVolumeClaim": &corev1.PersistentVolumeClaim{}, + "Secret": &corev1.Secret{}, + "ConfigMap": &corev1.ConfigMap{}, + "Service": &corev1.Service{}, + "CustomResourceDefinition": &apiextensionsv1.CustomResourceDefinition{}, + "ObservabilityAddon": &mcov1beta1.ObservabilityAddon{}, + } + return objs[kind] +} + +// CompareObject is used to compare two k8s objs are same or not +func CompareObject(re1 runtime.RawExtension, re2 runtime.RawExtension) bool { + if re2.Object == nil { + return reflect.DeepEqual(re1.Raw, re2.Raw) + } + obj1, err := GetObject(re1) + if err != nil { + return false + } + obj2, err := GetObject(re2) + if err != nil { + return false + } + kind1 := obj1.GetObjectKind().GroupVersionKind().Kind + kind2 := obj2.GetObjectKind().GroupVersionKind().Kind + if kind1 != kind2 { + log.Info("obj1 and obj2 have differnt Kind", "kind1", kind2, "kind2", kind2) + return false + } + return compFns[kind1](obj1, obj2) +} + +func GetObject(re runtime.RawExtension) (runtime.Object, error) { + if re.Object != nil { + return re.Object, nil + } + _, gvk, err := unstructured.UnstructuredJSONScheme.Decode(re.Raw, nil, re.Object) + if err != nil { + log.Error(err, "Failed to decode the raw") + return nil, err + } + obj := GetK8sObj(gvk.Kind) + err = yaml.NewYAMLOrJSONDecoder(strings.NewReader(string(re.Raw)), 100).Decode(obj) + if err != nil { + log.Error(err, "Failed to decode the raw to Kind", "kind", gvk.Kind) + return nil, err + } + return obj, nil +} + +func compareNamespaces(obj1 runtime.Object, obj2 runtime.Object) bool { + ns1 := obj1.(*corev1.Namespace) + ns2 := obj2.(*corev1.Namespace) + if ns1.Name != ns2.Name { + log.Info("Find updated namespace in manifestwork", "namespace1", ns1, "namespace2", ns2) + return false + } + return true +} + +func compareDeployments(obj1 runtime.Object, obj2 runtime.Object) bool { + dep1 := obj1.(*v1.Deployment) + dep2 := obj2.(*v1.Deployment) + if dep1.Name != dep2.Name || dep1.Namespace != dep2.Namespace { + log.Info("Find updated name/namespace for deployment", "deployment", dep1.Name) + return false + } + if !reflect.DeepEqual(dep1.Spec, dep2.Spec) { + log.Info("Find updated deployment", "deployment", dep1.Name) + return false + } + return true +} + +func compareServiceAccounts(obj1 runtime.Object, obj2 runtime.Object) bool { + sa1 := obj1.(*corev1.ServiceAccount) + sa2 := obj2.(*corev1.ServiceAccount) + if sa1.Name != sa2.Name || sa1.Namespace != sa2.Namespace { + log.Info("Find updated name/namespace for serviceaccount", "serviceaccount", sa1.Name) + return false + } + if !reflect.DeepEqual(sa1.ImagePullSecrets, sa2.ImagePullSecrets) { + log.Info("Find updated imagepullsecrets in serviceaccount", "serviceaccount", sa1.Name) + return false + } + return true +} + +func compareClusterRoles(obj1 runtime.Object, obj2 runtime.Object) bool { + cr1 := obj1.(*rbacv1.ClusterRole) + cr2 := obj2.(*rbacv1.ClusterRole) + if cr1.Name != cr2.Name { + log.Info("Find updated name for clusterrole", "clusterrole", cr1.Name) + return false + } + if !reflect.DeepEqual(cr1.Rules, cr2.Rules) { + log.Info("Find updated rules in clusterrole", "clusterrole", cr1.Name) + return false + } + return true +} + +func compareClusterRoleBindings(obj1 runtime.Object, obj2 runtime.Object) bool { + crb1 := obj1.(*rbacv1.ClusterRoleBinding) + crb2 := obj2.(*rbacv1.ClusterRoleBinding) + if crb1.Name != crb2.Name { + log.Info("Find updated name/namespace for clusterrolebinding", "clusterrolebinding", crb1.Name) + return false + } + if !reflect.DeepEqual(crb1.Subjects, crb2.Subjects) || !reflect.DeepEqual(crb1.RoleRef, crb2.RoleRef) { + log.Info("Find updated subjects/rolerefs for clusterrolebinding", "clusterrolebinding", crb1.Name) + return false + } + return true +} + +func compareSecrets(obj1 runtime.Object, obj2 runtime.Object) bool { + s1 := obj1.(*corev1.Secret) + s2 := obj2.(*corev1.Secret) + if s1.Name != s2.Name || s1.Namespace != s2.Namespace { + log.Info("Find updated name/namespace for secret", "secret", s1.Name) + return false + } + if !reflect.DeepEqual(s1.Data, s2.Data) { + log.Info("Find updated data in secret", "secret", s1.Name) + return false + } + return true +} + +func compareServices(obj1 runtime.Object, obj2 runtime.Object) bool { + s1 := obj1.(*corev1.Service) + s2 := obj2.(*corev1.Service) + if s1.Name != s2.Name || s1.Namespace != s2.Namespace { + log.Info("Find updated name/namespace for service", "service", s1.Name) + return false + } + if !reflect.DeepEqual(s1.Spec, s2.Spec) { + log.Info("Find updated data in service", "service", s1.Name) + return false + } + return true +} + +func compareConfigMap(obj1 runtime.Object, obj2 runtime.Object) bool { + cm1 := obj1.(*corev1.ConfigMap) + cm2 := obj2.(*corev1.ConfigMap) + if cm1.Name != cm2.Name || cm1.Namespace != cm2.Namespace { + log.Info("Find updated name/namespace for configmap", "configmap", cm1.Name) + return false + } + if !reflect.DeepEqual(cm1.Data, cm2.Data) { + log.Info("Find updated data in secret", "secret", cm1.Name) + return false + } + return true +} + +func compareCRD(obj1 runtime.Object, obj2 runtime.Object) bool { + crd1 := obj1.(*apiextensionsv1.CustomResourceDefinition) + crd2 := obj2.(*apiextensionsv1.CustomResourceDefinition) + if crd1.Name != crd2.Name { + log.Info("Find updated name for crd", "crd", crd1.Name) + return false + } + if !reflect.DeepEqual(crd1.Spec, crd2.Spec) { + log.Info("Find updated spec for crd", "crd", crd1.Name) + return false + } + return true +} + +func compareObsAddon(obj1 runtime.Object, obj2 runtime.Object) bool { + return reflect.DeepEqual(obj1, obj2) +} diff --git a/operators/pkg/util/obj_compare_test.go b/operators/pkg/util/obj_compare_test.go new file mode 100644 index 000000000..01e82e87e --- /dev/null +++ b/operators/pkg/util/obj_compare_test.go @@ -0,0 +1,330 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + replicas1 int32 = 1 + replicas2 int32 = 2 +) + +func TestCompareObject(t *testing.T) { + + cases := []struct { + name string + rawObj1 runtime.RawExtension + rawObj2 runtime.RawExtension + rawObj3 runtime.RawExtension + validateResults func(re1, re2 runtime.RawExtension) + }{ + { + name: "Compare namespaces", + rawObj1: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "test-ns-1" + } +}`), + }, + rawObj2: runtime.RawExtension{ + Object: &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ns-2", + }, + Spec: corev1.NamespaceSpec{}, + }, + }, + }, + { + name: "Compare serviceaccount", + rawObj1: runtime.RawExtension{ + Object: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa-1", + Namespace: "ns1", + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: "test-image-pull-secret-1", + }, + }, + }, + }, + rawObj2: runtime.RawExtension{ + Object: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa-2", + Namespace: "ns1", + }, + }, + }, + rawObj3: runtime.RawExtension{ + Object: &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa-1", + Namespace: "ns1", + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: "test-image-pull-secret-3", + }, + }, + }, + }, + }, + { + name: "Compare ClusterRoleBinding", + rawObj1: runtime.RawExtension{ + Object: &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding-1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-clusterrole-1", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns2", + }, + }, + }, + }, + rawObj2: runtime.RawExtension{ + Object: &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding-2", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-clusterrole-2", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns2", + }, + }, + }, + }, + rawObj3: runtime.RawExtension{ + Object: &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrolebinding-1", + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: "test-clusterrole-2", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "test-sa", + Namespace: "ns2", + }, + }, + }, + }, + }, + { + name: "Compare ClusterRole", + rawObj1: runtime.RawExtension{ + Object: &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole-1", + }, + }, + }, + rawObj2: runtime.RawExtension{ + Object: &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole-2", + }, + }, + }, + rawObj3: runtime.RawExtension{ + Object: &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-clusterrole-1", + }, + Rules: []rbacv1.PolicyRule{ + { + Resources: []string{ + "pods", + }, + Verbs: []string{ + "watch", + }, + APIGroups: []string{ + "", + }, + }, + }, + }, + }, + }, + { + name: "Compare Deployment", + rawObj1: runtime.RawExtension{ + Object: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-1", + Namespace: "ns1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas1, + }, + }, + }, + rawObj2: runtime.RawExtension{ + Object: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-2", + Namespace: "ns1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas1, + }, + }, + }, + rawObj3: runtime.RawExtension{ + Object: &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-1", + Namespace: "ns1", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas2, + }, + }, + }, + }, + { + name: "Compare Secret", + rawObj1: runtime.RawExtension{ + Object: &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret-1", + Namespace: "ns2", + }, + }, + }, + rawObj2: runtime.RawExtension{ + Object: &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret-2", + Namespace: "ns2", + }, + }, + }, + rawObj3: runtime.RawExtension{ + Object: &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-secret-1", + Namespace: "ns2", + }, + Data: map[string][]byte{ + "username": []byte("YWRtaW4="), + "password": []byte("MWYyZDFlMmU2N2Rm"), + }, + }, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + if CompareObject(c.rawObj1, c.rawObj2) { + t.Errorf("The two objects are same. Actually they should be different.") + } + if !CompareObject(c.rawObj1, c.rawObj1) { + t.Errorf("The same object should be no difference.") + } + if CompareObject(c.rawObj1, c.rawObj3) { + t.Errorf("The object may not be updated.") + } + }) + } + +} diff --git a/operators/pkg/util/util.go b/operators/pkg/util/util.go new file mode 100644 index 000000000..0c070b170 --- /dev/null +++ b/operators/pkg/util/util.go @@ -0,0 +1,113 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "crypto/rand" + "encoding/base64" + "net/http" + "net/http/pprof" + "os" +) + +// Remove is used to remove string from a string array +func Remove(list []string, s string) []string { + result := []string{} + for _, v := range list { + if v != s { + result = append(result, v) + } + } + return result +} + +// Contains is used to check whether a list contains string s +func Contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + return false +} + +// GetAnnotation returns the annotation value for a given key, or an empty string if not set +func GetAnnotation(annotations map[string]string, key string) string { + if annotations == nil { + return "" + } + return annotations[key] +} + +// GeneratePassword returns a base64 encoded securely random bytes. +func GeneratePassword(n int) (string, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(b), err +} + +// ProxyEnvVarsAreSet ... +// OLM handles these environment variables as a unit; +// if at least one of them is set, all three are considered overridden +// and the cluster-wide defaults are not used for the deployments of the subscribed Operator. +// https://docs.openshift.com/container-platform/4.6/operators/admin/olm-configuring-proxy-support.html +func ProxyEnvVarsAreSet() bool { + if os.Getenv("HTTP_PROXY") != "" || os.Getenv("HTTPS_PROXY") != "" || os.Getenv("NO_PROXY") != "" { + return true + } + return false +} + +func RemoveDuplicates(elements []string) []string { + // Use map to record duplicates as we find them. + encountered := map[string]struct{}{} + result := []string{} + + for _, v := range elements { + if _, found := encountered[v]; found { + continue + } + encountered[v] = struct{}{} + result = append(result, v) + } + // Return the new slice. + return result +} + +func RegisterDebugEndpoint(register func(string, http.Handler) error) error { + err := register("/debug/", http.Handler(http.DefaultServeMux)) + if err != nil { + return err + } + err = register("/debug/pprof/", http.HandlerFunc(pprof.Index)) + if err != nil { + return err + } + err = register("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + if err != nil { + return err + } + err = register("/debug/pprof/block", http.Handler(pprof.Handler("block"))) + if err != nil { + return err + } + err = register("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) + if err != nil { + return err + } + err = register("/debug/pprof/symobol", http.HandlerFunc(pprof.Symbol)) + if err != nil { + return err + } + err = register("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) + if err != nil { + return err + } + + return nil +} diff --git a/proxy/Dockerfile b/proxy/Dockerfile new file mode 100644 index 000000000..314480742 --- /dev/null +++ b/proxy/Dockerfile @@ -0,0 +1,49 @@ +# Copyright Contributors to the Open Cluster Management project + +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + +WORKDIR /workspace +COPY go.sum go.mod ./ +COPY ./proxy ./proxy + +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -v -i -o main proxy/cmd/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ARG VCS_REF +ARG VCS_URL +ARG IMAGE_NAME +ARG IMAGE_DESCRIPTION +ARG IMAGE_DISPLAY_NAME +ARG IMAGE_NAME_ARCH +ARG IMAGE_MAINTAINER +ARG IMAGE_VENDOR +ARG IMAGE_VERSION +ARG IMAGE_RELEASE +ARG IMAGE_SUMMARY +ARG IMAGE_OPENSHIFT_TAGS + +LABEL org.label-schema.vendor="Red Hat" \ + org.label-schema.name="$IMAGE_NAME_ARCH" \ + org.label-schema.description="$IMAGE_DESCRIPTION" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url=$VCS_URL \ + org.label-schema.license="Red Hat Advanced Cluster Management for Kubernetes EULA" \ + org.label-schema.schema-version="1.0" \ + name="$IMAGE_NAME" \ + maintainer="$IMAGE_MAINTAINER" \ + vendor="$IMAGE_VENDOR" \ + version="$IMAGE_VERSION" \ + release="$IMAGE_RELEASE" \ + description="$IMAGE_DESCRIPTION" \ + summary="$IMAGE_SUMMARY" \ + io.k8s.display-name="$IMAGE_DISPLAY_NAME" \ + io.k8s.description="$IMAGE_DESCRIPTION" \ + io.openshift.tags="$IMAGE_OPENSHIFT_TAGS" + +WORKDIR / +COPY --from=builder /workspace/main rbac-query-proxy + +EXPOSE 3002 + +ENTRYPOINT ["/rbac-query-proxy"] diff --git a/proxy/OWNERS b/proxy/OWNERS new file mode 100644 index 000000000..4d764aa64 --- /dev/null +++ b/proxy/OWNERS @@ -0,0 +1,10 @@ +approvers: + - clyang82 + - marcolan018 + - bjoydeep + - songleo +reviewers: + - clyang82 + - marcolan018 + - bjoydeep + - songleo \ No newline at end of file diff --git a/proxy/README.md b/proxy/README.md new file mode 100644 index 000000000..e64eda925 --- /dev/null +++ b/proxy/README.md @@ -0,0 +1,15 @@ +# rbac-query-proxy + +The rbac-query-proxy is a small HTTP reverse proxy, that can perform RBAC authorization against the server. Helper service that acts a multicluster metrics RBAC proxy. + +## Prerequisites + +- You must install [Open Cluster Management Observabilty](https://github.com/stolostron/multicluster-observability-operator) + +## How to build image + +``` +$ docker build -f Dockerfile.prow -t rbac-query-proxy:latest . +``` + +Now, you can use this image to replace the rbac-query-proxy component and verify your PRs. diff --git a/proxy/cmd/main.go b/proxy/cmd/main.go new file mode 100644 index 000000000..91d010ef6 --- /dev/null +++ b/proxy/cmd/main.go @@ -0,0 +1,70 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package main + +import ( + "flag" + "net/http" + "os" + + "github.com/spf13/pflag" + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + "github.com/stolostron/multicluster-observability-operator/proxy/pkg/proxy" + "github.com/stolostron/multicluster-observability-operator/proxy/pkg/util" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" +) + +const ( + defaultListenAddress = "0.0.0.0:3002" +) + +type proxyConf struct { + listenAddress string + metricServer string + kubeconfigLocation string +} + +func main() { + + cfg := proxyConf{} + + klogFlags := flag.NewFlagSet(os.Args[0], flag.ExitOnError) + klog.InitFlags(klogFlags) + flagset := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError) + flagset.AddGoFlagSet(klogFlags) + + flagset.StringVar(&cfg.listenAddress, "listen-address", + defaultListenAddress, "The address HTTP server should listen on.") + flagset.StringVar(&cfg.metricServer, "metrics-server", "", + "The address the metrics server should run on.") + + _ = flagset.Parse(os.Args[1:]) + if err := os.Setenv("METRICS_SERVER", cfg.metricServer); err != nil { + klog.Fatalf("failed to Setenv: %v", err) + } + + //Kubeconfig flag + flagset.StringVar(&cfg.kubeconfigLocation, "kubeconfig", "", + "Path to a kubeconfig file. If unset, in-cluster configuration will be used") + + klog.Infof("proxy server will running on: %s", cfg.listenAddress) + klog.Infof("metrics server is: %s", cfg.metricServer) + klog.Infof("kubeconfig is: %s", cfg.kubeconfigLocation) + + clusterClient, err := clusterclientset.NewForConfig(config.GetConfigOrDie()) + if err != nil { + klog.Fatalf("failed to new cluster clientset: %v", err) + } + + // watch all managed clusters + go util.WatchManagedCluster(clusterClient) + go util.CleanExpiredProjectInfo(24 * 60 * 60) + + http.HandleFunc("/", proxy.HandleRequestAndRedirect) + if err := http.ListenAndServe(cfg.listenAddress, nil); err != nil { + klog.Fatalf("failed to ListenAndServe: %v", err) + } +} diff --git a/proxy/deploy/cluster-role-binding.yaml b/proxy/deploy/cluster-role-binding.yaml new file mode 100644 index 000000000..eeb01d29f --- /dev/null +++ b/proxy/deploy/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: rbac-query-proxy +subjects: +- kind: ServiceAccount + name: rbac-query-proxy + namespace: open-cluster-management +roleRef: + kind: ClusterRole + name: rbac-query-proxy + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/proxy/deploy/cluster-role.yaml b/proxy/deploy/cluster-role.yaml new file mode 100644 index 000000000..aa50d730a --- /dev/null +++ b/proxy/deploy/cluster-role.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rbac-query-proxy +rules: +- apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + verbs: + - watch diff --git a/proxy/deploy/deployment.yaml b/proxy/deploy/deployment.yaml new file mode 100644 index 000000000..38d64fe4a --- /dev/null +++ b/proxy/deploy/deployment.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rbac-query-proxy +spec: + replicas: 1 + selector: + matchLabels: + app: rbac-query-proxy + template: + metadata: + labels: + app: rbac-query-proxy + spec: + serviceAccountName: rbac-query-proxy + containers: + - name: rbac-query-proxy + image: blue0/rbac-query-proxy:latest + args: + - "--listen-address=0.0.0.0:8080" + - "--metrics-server=https://observability-observatorium-observatorium-api.open-cluster-management-observability.svc.cluster.local:8080" + ports: + - containerPort: 8080 + name: http + volumeMounts: + - name: ca-certs + mountPath: /var/rbac_proxy/ca + - name: client-certs + mountPath: /var/rbac_proxy/certs + volumes: + - name: ca-certs + secret: + secretName: observability-server-certs + - name: client-certs + secret: + secretName: observability-grafana-certs \ No newline at end of file diff --git a/proxy/deploy/kustomization.yaml b/proxy/deploy/kustomization.yaml new file mode 100644 index 000000000..86b72fc7d --- /dev/null +++ b/proxy/deploy/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- cluster-role-binding.yaml +- cluster-role.yaml +- deployment.yaml +- service-account.yaml +- service.yaml diff --git a/proxy/deploy/service-account.yaml b/proxy/deploy/service-account.yaml new file mode 100644 index 000000000..4d301bdbe --- /dev/null +++ b/proxy/deploy/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rbac-query-proxy + namespace: open-cluster-management \ No newline at end of file diff --git a/proxy/deploy/service.yaml b/proxy/deploy/service.yaml new file mode 100644 index 000000000..a728f1523 --- /dev/null +++ b/proxy/deploy/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: rbac-query-proxy + namespace: open-cluster-management +spec: + ports: + - name: http + port: 8080 + targetPort: http + selector: + app: rbac-query-proxy \ No newline at end of file diff --git a/proxy/examples/managedcluster/cluster1.yaml b/proxy/examples/managedcluster/cluster1.yaml new file mode 100644 index 000000000..37746dafd --- /dev/null +++ b/proxy/examples/managedcluster/cluster1.yaml @@ -0,0 +1,9 @@ +apiVersion: cluster.open-cluster-management.io/v1 +kind: ManagedCluster +metadata: + name: cluster1 +spec: + hubAcceptsClient: true + managedClusterClientConfigs: + - caBundle: cluster1 + url: https://cluster1.com diff --git a/proxy/examples/managedcluster/cluster2.yaml b/proxy/examples/managedcluster/cluster2.yaml new file mode 100644 index 000000000..99c339dd5 --- /dev/null +++ b/proxy/examples/managedcluster/cluster2.yaml @@ -0,0 +1,9 @@ +apiVersion: cluster.open-cluster-management.io/v1 +kind: ManagedCluster +metadata: + name: cluster2 +spec: + hubAcceptsClient: true + managedClusterClientConfigs: + - caBundle: cluster2 + url: https://cluster2.com diff --git a/proxy/examples/rbac/README.md b/proxy/examples/rbac/README.md new file mode 100644 index 000000000..a17dffcef --- /dev/null +++ b/proxy/examples/rbac/README.md @@ -0,0 +1,23 @@ +## rbac + +- admin-rbac.yaml: assign `admin` to cluster manager +- user1-rbac.yaml: assign `user1` to `cluster1` manager +- user2-rbac.yaml: assign `user2` to `cluster2` manager + +``` +$ oc apply -f admin-rbac.yaml -f user1-rbac.yaml -f user2-rbac.yaml +$ oc apply -f cluster1.yaml -f cluster2.yaml +$ oc login -u admin -p admin +$ oc get managedclusters +NAME AGE +cluster1 12m +cluster2 12m +$ oc login -u user1 -p user1 +$ oc get managedclusters cluster1 +NAME AGE +cluster1 16m +$ oc login -u user2 -p user2 +$ oc get managedclusters cluster2 +NAME AGE +cluster2 16m +``` \ No newline at end of file diff --git a/proxy/examples/rbac/admin-rbac.yaml b/proxy/examples/rbac/admin-rbac.yaml new file mode 100644 index 000000000..6035682db --- /dev/null +++ b/proxy/examples/rbac/admin-rbac.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:cluster-manager-admin +rules: +- apiGroups: ["cluster.open-cluster-management.io"] + resources: ["managedclusters"] + verbs: ["*"] +- apiGroups: ["hive.openshift.io"] + resources: ["*"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["*"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:cluster-manager-admin +subjects: +- kind: User + name: admin + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: open-cluster-management:cluster-manager-admin + apiGroup: rbac.authorization.k8s.io diff --git a/proxy/examples/rbac/user1-rbac.yaml b/proxy/examples/rbac/user1-rbac.yaml new file mode 100644 index 000000000..609568d99 --- /dev/null +++ b/proxy/examples/rbac/user1-rbac.yaml @@ -0,0 +1,47 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:admin:managed-cluster-cluster1 +rules: +- apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + resourceNames: + - cluster1 + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + resourceNames: + - cluster1 + - default + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:admin:managed-cluster-cluster1 +subjects: +- kind: User + name: user1 + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: open-cluster-management:admin:managed-cluster-cluster1 + apiGroup: rbac.authorization.k8s.io diff --git a/proxy/examples/rbac/user2-rbac.yaml b/proxy/examples/rbac/user2-rbac.yaml new file mode 100644 index 000000000..9d7b153ba --- /dev/null +++ b/proxy/examples/rbac/user2-rbac.yaml @@ -0,0 +1,47 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: open-cluster-management:admin:managed-cluster-cluster2 +rules: +- apiGroups: + - cluster.open-cluster-management.io + resources: + - managedclusters + resourceNames: + - cluster2 + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - namespaces + resourceNames: + - cluster2 + - default + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: open-cluster-management:admin:managed-cluster-cluster2 +subjects: +- kind: User + name: user2 + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: open-cluster-management:admin:managed-cluster-cluster2 + apiGroup: rbac.authorization.k8s.io diff --git a/proxy/pkg/proxy/proxy.go b/proxy/pkg/proxy/proxy.go new file mode 100644 index 000000000..a05377ea6 --- /dev/null +++ b/proxy/pkg/proxy/proxy.go @@ -0,0 +1,155 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package proxy + +import ( + "bytes" + "compress/gzip" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "net/url" + "os" + "path" + "strings" + + "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + "github.com/stolostron/multicluster-observability-operator/proxy/pkg/util" +) + +const ( + basePath = "/api/metrics/v1/default" + projectsAPIPath = "/apis/project.openshift.io/v1/projects" + userAPIPath = "/apis/user.openshift.io/v1/users/~" +) + +var ( + serverScheme = "" + serverHost = "" +) + +// HandleRequestAndRedirect is used to init proxy handler +func HandleRequestAndRedirect(res http.ResponseWriter, req *http.Request) { + if preCheckRequest(req) != nil { + _, err := res.Write(newEmptyMatrixHTTPBody()) + if err != nil { + klog.Errorf("failed to write response: %v", err) + } + return + } + + serverURL, err := url.Parse(os.Getenv("METRICS_SERVER")) + if err != nil { + klog.Errorf("failed to parse url: %v", err) + } + serverHost = serverURL.Host + serverScheme = serverURL.Scheme + + tlsTransport, err := getTLSTransport() + if err != nil { + klog.Fatalf("failed to create tls transport: %v", err) + } + + // create the reverse proxy + proxy := httputil.ReverseProxy{ + Director: proxyRequest, + Transport: tlsTransport, + } + + req.Header.Set("X-Forwarded-Host", req.Header.Get("Host")) + req.Host = serverURL.Host + req.URL.Path = path.Join(basePath, req.URL.Path) + util.ModifyMetricsQueryParams(req, config.GetConfigOrDie().Host+projectsAPIPath) + proxy.ServeHTTP(res, req) +} + +func errorHandle(rw http.ResponseWriter, req *http.Request, err error) { + token := req.Header.Get("X-Forwarded-Access-Token") + if token == "" { + rw.WriteHeader(http.StatusUnauthorized) + } +} + +func preCheckRequest(req *http.Request) error { + token := req.Header.Get("X-Forwarded-Access-Token") + if token == "" { + token = req.Header.Get("Authorization") + if token == "" { + return errors.New("found unauthorized user") + } else { + req.Header.Set("X-Forwarded-Access-Token", token) + } + } + + userName := req.Header.Get("X-Forwarded-User") + if userName == "" { + userName = util.GetUserName(token, config.GetConfigOrDie().Host+userAPIPath) + if userName == "" { + return errors.New("failed to found user name") + } else { + req.Header.Set("X-Forwarded-User", userName) + } + } + + projectList, ok := util.GetUserProjectList(token) + if !ok { + projectList = util.FetchUserProjectList(token, config.GetConfigOrDie().Host+projectsAPIPath) + up := util.NewUserProject(userName, token, projectList) + util.UpdateUserProject(up) + } + + if len(projectList) == 0 || len(util.GetAllManagedClusterNames()) == 0 { + return errors.New("no project or cluster found") + } + + return nil +} + +func newEmptyMatrixHTTPBody() []byte { + var bodyBuff bytes.Buffer + gz := gzip.NewWriter(&bodyBuff) + if _, err := gz.Write([]byte(`{"status":"success","data":{"resultType":"matrix","result":[]}}`)); err != nil { + klog.Errorf("failed to write body: %v", err) + } + + if err := gz.Close(); err != nil { + klog.Errorf("failed to close gzip writer: %v", err) + } + + var gzipBuff bytes.Buffer + err := gzipWrite(&gzipBuff, bodyBuff.Bytes()) + if err != nil { + klog.Errorf("failed to write with gizp: %v", err) + } + + return gzipBuff.Bytes() +} + +func gzipWrite(w io.Writer, data []byte) error { + gw, err := gzip.NewWriterLevel(w, gzip.BestSpeed) + if err != nil { + return err + } + defer gw.Close() + _, err = gw.Write(data) + return err +} + +func proxyRequest(r *http.Request) { + r.URL.Scheme = serverScheme + r.URL.Host = serverHost + if r.Method == http.MethodGet { + if strings.HasSuffix(r.URL.Path, "/api/v1/query") || + strings.HasSuffix(r.URL.Path, "/api/v1/query_range") || + strings.HasSuffix(r.URL.Path, "/api/v1/series") { + r.Method = http.MethodPost + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + r.Body = ioutil.NopCloser(strings.NewReader(r.URL.RawQuery)) + } + } +} diff --git a/proxy/pkg/proxy/proxy_test.go b/proxy/pkg/proxy/proxy_test.go new file mode 100644 index 000000000..d818cc187 --- /dev/null +++ b/proxy/pkg/proxy/proxy_test.go @@ -0,0 +1,185 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package proxy + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "log" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/stolostron/multicluster-observability-operator/proxy/pkg/util" +) + +func TestNewEmptyMatrixHTTPBody(t *testing.T) { + body := newEmptyMatrixHTTPBody() + gr, err := gzip.NewReader(bytes.NewBuffer([]byte(body))) + defer gr.Close() + data, err := ioutil.ReadAll(gr) + if err != nil { + log.Fatal(err) + } + + var decompressedBuff bytes.Buffer + gr, err = gzip.NewReader(bytes.NewBuffer([]byte(data))) + defer gr.Close() + data, err = ioutil.ReadAll(gr) + if err != nil { + t.Errorf("failed to ReadAll: %v", err) + } + + decompressedBuff.Write(data) + emptyMatrix := `{"status":"success","data":{"resultType":"matrix","result":[]}}` + if decompressedBuff.String() != emptyMatrix { + t.Errorf("(%v) is not the expected: (%v)", decompressedBuff.String(), emptyMatrix) + } +} + +type FakeResponse struct { + t *testing.T + headers http.Header + body []byte + status int +} + +func NewFakeResponse(t *testing.T) *FakeResponse { + return &FakeResponse{ + t: t, + headers: make(http.Header), + } +} + +func (r *FakeResponse) Header() http.Header { + return r.headers +} + +func (r *FakeResponse) Write(body []byte) (int, error) { + r.body = body + return len(body), nil +} + +func (r *FakeResponse) WriteHeader(status int) { + r.status = status +} + +func TestErrorHandle(t *testing.T) { + req, _ := http.NewRequest("GET", "http://127.0.0.1:3002/metrics/query?query=foo", nil) + req.Header.Set("X-Forwarded-User", "test") + var err error + fakeResp := NewFakeResponse(t) + errorHandle(fakeResp, req, err) + if fakeResp.status != http.StatusUnauthorized { + t.Errorf("failed to get expected status: %v", fakeResp.status) + } +} + +func TestPreCheckRequest(t *testing.T) { + req, _ := http.NewRequest("GET", "http://127.0.0.1:3002/metrics/query?query=foo", nil) + resp := http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString("test")), + Header: make(http.Header), + Request: req, + } + resp.Request.Header.Set("X-Forwarded-Access-Token", "test") + resp.Request.Header.Set("X-Forwarded-User", "test") + util.InitUserProjectInfo() + up := util.NewUserProject("test", "test", []string{"p"}) + util.UpdateUserProject(up) + util.InitAllManagedClusterNames() + clusters := util.GetAllManagedClusterNames() + clusters["p"] = "p" + err := preCheckRequest(req) + if err != nil { + t.Errorf("failed to test preCheckRequest: %v", err) + } + + resp.Request.Header.Del("X-Forwarded-Access-Token") + resp.Request.Header.Add("Authorization", "test") + err = preCheckRequest(req) + if err != nil { + t.Errorf("failed to test preCheckRequest with bear token: %v", err) + } + + resp.Request.Header.Del("X-Forwarded-User") + err = preCheckRequest(req) + if !strings.Contains(err.Error(), "failed to found user name") { + t.Errorf("failed to test preCheckRequest: %v", err) + } + + resp.Request.Header.Del("X-Forwarded-Access-Token") + resp.Request.Header.Del("Authorization") + err = preCheckRequest(req) + if !strings.Contains(err.Error(), "found unauthorized user") { + t.Errorf("failed to test preCheckRequest: %v", err) + } + +} + +func TestGzipWrite(t *testing.T) { + originalStr := "test" + var compressedBuff bytes.Buffer + err := gzipWrite(&compressedBuff, []byte(originalStr)) + if err != nil { + t.Errorf("failed to compressed: %v", err) + } + var decompressedBuff bytes.Buffer + gr, err := gzip.NewReader(bytes.NewBuffer(compressedBuff.Bytes())) + defer gr.Close() + data, err := ioutil.ReadAll(gr) + if err != nil { + t.Errorf("failed to decompressed: %v", err) + } + decompressedBuff.Write(data) + if decompressedBuff.String() != originalStr { + t.Errorf("(%v) is not the expected: (%v)", originalStr, decompressedBuff.String()) + } +} + +func TestProxyRequest(t *testing.T) { + req := http.Request{} + req.URL = &url.URL{} + req.Header = http.Header(map[string][]string{}) + proxyRequest(&req) + if req.Body != nil { + t.Errorf("(%v) is not the expected nil", req.Body) + } + if req.Header.Get("Content-Type") != "" { + t.Errorf("(%v) is not the expected: (\"\")", req.Header.Get("Content-Type")) + } + + req.Method = http.MethodGet + pathList := []string{ + "/api/v1/query", + "/api/v1/query_range", + "/api/v1/series", + } + + for _, path := range pathList { + req.URL.Path = path + proxyRequest(&req) + if req.Method != http.MethodPost { + t.Errorf("(%v) is not the expected: (%v)", http.MethodPost, req.Method) + } + + if req.Header.Get("Content-Type") != "application/x-www-form-urlencoded" { + t.Errorf("(%v) is not the expected: (%v)", req.Header.Get("Content-Type"), "application/x-www-form-urlencoded") + } + + if req.Body == nil { + t.Errorf("(%v) is not the expected non-nil", req.Body) + } + + if req.URL.Scheme != "" { + t.Errorf("(%v) is not the expected \"\"", req.URL.Scheme) + } + + if req.URL.Host != "" { + t.Errorf("(%v) is not the expected \"\"", req.URL.Host) + } + } +} diff --git a/proxy/pkg/proxy/tls.go b/proxy/pkg/proxy/tls.go new file mode 100644 index 000000000..a3d7a714e --- /dev/null +++ b/proxy/pkg/proxy/tls.go @@ -0,0 +1,61 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package proxy + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "net/http" + "path" + "path/filepath" + "time" + + "k8s.io/klog" +) + +const ( + caPath = "/var/rbac_proxy/ca" + certPath = "/var/rbac_proxy/certs" +) + +func getTLSTransport() (*http.Transport, error) { + + caCertFile := path.Join(caPath, "ca.crt") + tlsKeyFile := path.Join(certPath, "tls.key") + tlsCrtFile := path.Join(certPath, "tls.crt") + + // Load Server CA cert + caCert, err := ioutil.ReadFile(filepath.Clean(caCertFile)) + if err != nil { + klog.Error("failed to load server ca cert file") + return nil, err + } + // Load client cert signed by Client CA + cert, err := tls.LoadX509KeyPair(filepath.Clean(tlsCrtFile), filepath.Clean(tlsKeyFile)) + if err != nil { + klog.Error("failed to load client cert/key") + return nil, err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + // Setup HTTPS client + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + } + return &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 300 * time.Second, + }).Dial, + TLSHandshakeTimeout: 30 * time.Second, + ResponseHeaderTimeout: 300 * time.Second, + DisableKeepAlives: true, + TLSClientConfig: tlsConfig, + }, nil +} diff --git a/proxy/pkg/rewrite/rewrite.go b/proxy/pkg/rewrite/rewrite.go new file mode 100644 index 000000000..af40b635d --- /dev/null +++ b/proxy/pkg/rewrite/rewrite.go @@ -0,0 +1,52 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rewrite + +import ( + "regexp" + "strings" + + "github.com/prometheus-community/prom-label-proxy/injectproxy" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" + "k8s.io/klog" +) + +const ( + placeholderMetrics = "acm_metrics_placeholder" +) + +// InjectLabels is used to inject addtional label filters into original query +func InjectLabels(query string, label string, values []string) (string, error) { + + reg := regexp.MustCompile(`([{|,][ ]*)(` + label + `[ ]*)(=|!=|=~|!~)([ ]*"[^"]+")`) + query = reg.ReplaceAllString(query, "$1 "+placeholderMetrics+" $3$4") + + expr, err := parser.ParseExpr(query) + if err != nil { + klog.Errorf("Failed to parse the query %s: %v", query, err) + return "", err + } + + matchType := labels.MatchRegexp + if len(values) == 1 { + matchType = labels.MatchEqual + } + err = injectproxy.NewEnforcer([]*labels.Matcher{ + { + Name: label, + Type: matchType, + Value: strings.Join(values[:], "|"), + }, + }...).EnforceNode(expr) + if err != nil { + klog.Errorf("Failed to inject the label filters: %v", err) + return "", err + } + + query = strings.Replace(expr.String(), placeholderMetrics, label, -1) + klog.Infof("Query string after filter inject: %s", query) + + return query, nil +} diff --git a/proxy/pkg/rewrite/rewrite_test.go b/proxy/pkg/rewrite/rewrite_test.go new file mode 100644 index 000000000..7d687536f --- /dev/null +++ b/proxy/pkg/rewrite/rewrite_test.go @@ -0,0 +1,84 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package rewrite + +import "testing" + +func TestInjectLabels(t *testing.T) { + caseList := []struct { + name string + query string + label string + values []string + expected string + }{ + { + name: "No metrics specified", + query: `{key="value"}`, + label: "cluster", + values: []string{"A"}, + expected: `{cluster="A",key="value"}`, + }, + { + name: "Only metrics name", + query: "test_metrics", + label: "cluster", + values: []string{"A"}, + expected: `test_metrics{cluster="A"}`, + }, + { + name: "Metrics with label", + query: `test_metrics{key="value"}`, + label: "cluster", + values: []string{"A"}, + expected: `test_metrics{cluster="A",key="value"}`, + }, + { + name: "Multiple values", + query: `test_metrics{key="value"}`, + label: "cluster", + values: []string{"A", "B"}, + expected: `test_metrics{cluster=~"A|B",key="value"}`, + }, + { + name: "Existing label for cluster", + query: `test_metrics{cluster="A"}`, + label: "cluster", + values: []string{"A", "B"}, + expected: `test_metrics{cluster="A",cluster=~"A|B"}`, + }, + { + name: "Existing label for cluster using different ops", + query: `test_metrics{cluster!="A",cluster=~"C|D",cluster!~"E|F"}`, + label: "cluster", + values: []string{"A", "B"}, + expected: `test_metrics{cluster!="A",cluster!~"E|F",cluster=~"C|D",cluster=~"A|B"}`, + }, + { + name: "Existing label for cluster and others", + query: `test_metrics{akey="value",cluster="A"}`, + label: "cluster", + values: []string{"A", "B"}, + expected: `test_metrics{cluster="A",akey="value",cluster=~"A|B"}`, + }, + { + name: "Blank in existing query", + query: `test_metrics{akey = "value", cluster = "A"}`, + label: "cluster", + values: []string{"A", "B"}, + expected: `test_metrics{cluster="A",akey="value",cluster=~"A|B"}`, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + output, err := InjectLabels(c.query, c.label, c.values) + if err != nil { + t.Errorf("Encountered error during label injection: (%v)", err) + } else if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + }) + } +} diff --git a/proxy/pkg/util/user_project.go b/proxy/pkg/util/user_project.go new file mode 100644 index 000000000..0f9838fea --- /dev/null +++ b/proxy/pkg/util/user_project.go @@ -0,0 +1,77 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "sync" + "time" + + "k8s.io/klog" +) + +var userProjectInfo *UserProjectInfo + +type UserProjectInfo struct { + sync.RWMutex + ProjectInfo map[string]UserProject +} + +type UserProject struct { + UserName string + Timestamp int64 + Token string + ProjectList []string +} + +func InitUserProjectInfo() { + userProjectInfo = new(UserProjectInfo) + userProjectInfo.ProjectInfo = map[string]UserProject{} +} + +func NewUserProject(userName string, token string, projects []string) UserProject { + up := UserProject{} + up.UserName = userName + up.Timestamp = time.Now().Unix() + up.Token = token + up.ProjectList = projects + return up +} + +func deleteUserProject(up UserProject) { + userProjectInfo.Lock() + delete(userProjectInfo.ProjectInfo, up.Token) + userProjectInfo.Unlock() +} + +func UpdateUserProject(up UserProject) { + userProjectInfo.Lock() + userProjectInfo.ProjectInfo[up.Token] = up + userProjectInfo.Unlock() +} + +func GetUserProjectList(token string) ([]string, bool) { + userProjectInfo.Lock() + up, ok := userProjectInfo.ProjectInfo[token] + userProjectInfo.Unlock() + if ok { + return up.ProjectList, true + } + return []string{}, false +} + +func CleanExpiredProjectInfo(expiredTimeSeconds int64) { + InitUserProjectInfo() + ticker := time.NewTicker(time.Duration(time.Second * time.Duration(expiredTimeSeconds))) + defer ticker.Stop() + + for { + <-ticker.C + for _, up := range userProjectInfo.ProjectInfo { + if time.Now().Unix()-up.Timestamp >= expiredTimeSeconds { + klog.Infof("clean %v project info", up.UserName) + deleteUserProject(up) + } + } + } +} diff --git a/proxy/pkg/util/user_project_test.go b/proxy/pkg/util/user_project_test.go new file mode 100644 index 000000000..0eaef3429 --- /dev/null +++ b/proxy/pkg/util/user_project_test.go @@ -0,0 +1,110 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "strconv" + "testing" + "time" +) + +func TestGetUserProjectList(t *testing.T) { + testCaseList := []struct { + name string + token string + userProjectInfo *UserProjectInfo + expected bool + }{ + { + "should has user project", + "1", + &UserProjectInfo{ + ProjectInfo: map[string]UserProject{ + "1": UserProject{ + UserName: "user" + strconv.Itoa(1), + Timestamp: time.Now().Unix(), + Token: strconv.Itoa(1), + ProjectList: []string{"p" + strconv.Itoa(1)}, + }, + }, + }, + true, + }, + + { + "should has not user project", + "invalid", + &UserProjectInfo{ + ProjectInfo: map[string]UserProject{ + "1": UserProject{ + UserName: "user" + strconv.Itoa(1), + Timestamp: time.Now().Unix(), + Token: strconv.Itoa(1), + ProjectList: []string{"p" + strconv.Itoa(1)}, + }, + }, + }, + false, + }, + } + + for _, c := range testCaseList { + userProjectInfo = c.userProjectInfo + _, output := GetUserProjectList(c.token) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestCleanExpiredProjectInfo(t *testing.T) { + testCaseList := []struct { + name string + token string + userProjectInfo *UserProjectInfo + expected bool + }{ + { + "user project should expired", + "1", + &UserProjectInfo{ + ProjectInfo: map[string]UserProject{ + "1": UserProject{ + UserName: "user" + strconv.Itoa(1), + Timestamp: time.Now().Unix(), + Token: strconv.Itoa(1), + ProjectList: []string{"p" + strconv.Itoa(1)}, + }, + }, + }, + false, + }, + + { + "user project should not expired", + "2", + &UserProjectInfo{ + ProjectInfo: map[string]UserProject{ + "2": UserProject{ + UserName: "user" + strconv.Itoa(2), + Timestamp: time.Now().Unix() + 10, + Token: strconv.Itoa(2), + ProjectList: []string{"p" + strconv.Itoa(2)}, + }, + }, + }, + true, + }, + } + + go CleanExpiredProjectInfo(1) + for _, c := range testCaseList { + userProjectInfo = c.userProjectInfo + time.Sleep(time.Second * 2) + _, output := GetUserProjectList(c.token) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} diff --git a/proxy/pkg/util/util.go b/proxy/pkg/util/util.go new file mode 100644 index 000000000..00401daca --- /dev/null +++ b/proxy/pkg/util/util.go @@ -0,0 +1,287 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + projectv1 "github.com/openshift/api/project/v1" + userv1 "github.com/openshift/api/user/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/proxy/pkg/rewrite" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +const ( + managedClusterAPIPath = "/apis/cluster.open-cluster-management.io/v1/managedclusters" + caPath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +) + +var allManagedClusterNames map[string]string + +func GetAllManagedClusterNames() map[string]string { + return allManagedClusterNames +} + +func InitAllManagedClusterNames() { + allManagedClusterNames = map[string]string{} +} + +// ModifyMetricsQueryParams will modify request url params for query metrics +func ModifyMetricsQueryParams(req *http.Request, url string) { + userName := req.Header.Get("X-Forwarded-User") + klog.V(1).Infof("user is %v", userName) + klog.V(1).Infof("URL is: %s", req.URL) + klog.V(1).Infof("URL path is: %v", req.URL.Path) + klog.V(1).Infof("URL RawQuery is: %v", req.URL.RawQuery) + token := req.Header.Get("X-Forwarded-Access-Token") + if token == "" { + klog.Errorf("failed to get token from http header") + } + + projectList, ok := GetUserProjectList(token) + klog.V(1).Infof("projectList from local mem cache = %v, ok = %v", projectList, ok) + if !ok { + projectList = FetchUserProjectList(token, url) + up := NewUserProject(userName, token, projectList) + UpdateUserProject(up) + klog.V(1).Infof("projectList from api server = %v", projectList) + } + + klog.V(1).Infof("cluster list: %v", allManagedClusterNames) + klog.V(1).Infof("user <%s> project list: %v", userName, projectList) + if canAccessAllClusters(projectList) { + klog.Infof("user <%v> have access to all clusters", userName) + return + } + + clusterList := getUserClusterList(projectList) + klog.Infof("user <%v> have access to these clusters: %v", userName, clusterList) + queryValues := req.URL.Query() + if len(queryValues) == 0 { + return + } + + queryValues = rewriteQuery(queryValues, clusterList, "query") + queryValues = rewriteQuery(queryValues, clusterList, "match[]") + req.URL.RawQuery = queryValues.Encode() + + queryValues = req.URL.Query() + klog.V(1).Info("modified URL is:") + klog.V(1).Infof("URL is: %s", req.URL) + klog.V(1).Infof("URL path is: %v", req.URL.Path) + klog.V(1).Infof("URL RawQuery is: %v", req.URL.RawQuery) + return +} + +// WatchManagedCluster will watch and save managedcluster when create/update/delete managedcluster +func WatchManagedCluster(clusterClient clusterclientset.Interface) { + InitAllManagedClusterNames() + watchlist := cache.NewListWatchFromClient(clusterClient.ClusterV1().RESTClient(), "managedclusters", v1.NamespaceAll, + fields.Everything()) + _, controller := cache.NewInformer( + watchlist, + &clusterv1.ManagedCluster{}, + time.Second*0, + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + clusterName := obj.(*clusterv1.ManagedCluster).Name + klog.Infof("added a managedcluster: %s \n", obj.(*clusterv1.ManagedCluster).Name) + allManagedClusterNames[clusterName] = clusterName + }, + + DeleteFunc: func(obj interface{}) { + clusterName := obj.(*clusterv1.ManagedCluster).Name + klog.Infof("deleted a managedcluster: %s \n", obj.(*clusterv1.ManagedCluster).Name) + delete(allManagedClusterNames, clusterName) + }, + + UpdateFunc: func(oldObj, newObj interface{}) { + clusterName := newObj.(*clusterv1.ManagedCluster).Name + klog.Infof("changed a managedcluster: %s \n", newObj.(*clusterv1.ManagedCluster).Name) + allManagedClusterNames[clusterName] = clusterName + }, + }, + ) + + stop := make(chan struct{}) + go controller.Run(stop) + for { + time.Sleep(time.Second * 30) + klog.V(1).Infof("found %v clusters", len(allManagedClusterNames)) + } +} + +func sendHTTPRequest(url string, verb string, token string) (*http.Response, error) { + req, err := http.NewRequest(verb, url, nil) + if err != nil { + klog.Errorf("failed to new http request: %v", err) + return nil, err + } + + if len(token) == 0 { + transport := &http.Transport{} + defaultClient := &http.Client{Transport: transport} + return defaultClient.Do(req) + } + + if !strings.HasPrefix(token, "Bearer ") { + token = "Bearer " + token + } + req.Header.Set("Authorization", token) + caCert, err := ioutil.ReadFile(filepath.Clean(caPath)) + if err != nil { + klog.Error("failed to load root ca cert file") + return nil, err + } + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, + }, + MaxIdleConns: 100, + IdleConnTimeout: 60 * time.Second, + } + + client := http.Client{Transport: tr} + return client.Do(req) +} + +func FetchUserProjectList(token string, url string) []string { + resp, err := sendHTTPRequest(url, "GET", token) + if err != nil { + klog.Errorf("failed to send http request: %v", err) + /* + This is adhoc step to make sure that if this error happens, + we can automatically restart the POD using liveness probe which checks for this file. + Once the real cause is determined and fixed, we will remove this. + */ + writeError(fmt.Sprintf("failed to send http request: %v", err)) + return []string{} + } + defer resp.Body.Close() + + var projects projectv1.ProjectList + err = json.NewDecoder(resp.Body).Decode(&projects) + if err != nil { + klog.Errorf("failed to decode response json body: %v", err) + return []string{} + } + + projectList := make([]string, len(projects.Items)) + for idx, p := range projects.Items { + projectList[idx] = p.Name + } + + return projectList +} + +func GetUserName(token string, url string) string { + resp, err := sendHTTPRequest(url, "GET", token) + if err != nil { + klog.Errorf("failed to send http request: %v", err) + writeError(fmt.Sprintf("failed to send http request: %v", err)) + return "" + } + defer resp.Body.Close() + + user := userv1.User{} + err = json.NewDecoder(resp.Body).Decode(&user) + if err != nil { + klog.Errorf("failed to decode response json body: %v", err) + return "" + } + + return user.Name +} + +// Contains is used to check whether a list contains string s +func Contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + return false +} + +// canAccessAllClusters check user have permission to access all clusters +func canAccessAllClusters(projectList []string) bool { + if len(allManagedClusterNames) == 0 && len(projectList) == 0 { + return false + } + + for name := range allManagedClusterNames { + if !Contains(projectList, name) { + return false + } + } + + return true +} + +func getUserClusterList(projectList []string) []string { + clusterList := []string{} + if len(projectList) == 0 { + return clusterList + } + + for _, projectName := range projectList { + clusterName, ok := allManagedClusterNames[projectName] + if ok { + clusterList = append(clusterList, clusterName) + } + } + + return clusterList +} + +func rewriteQuery(queryValues url.Values, clusterList []string, key string) url.Values { + originalQuery := queryValues.Get(key) + if len(originalQuery) == 0 { + return queryValues + } + + modifiedQuery, err := rewrite.InjectLabels(originalQuery, "cluster", clusterList) + if err != nil { + return queryValues + } + + queryValues.Del(key) + queryValues.Add(key, modifiedQuery) + return queryValues +} + +func writeError(msg string) { + f, err := os.OpenFile("/tmp/health", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + klog.Errorf("failed to create file for probe: %v", err) + } + + _, err = f.Write([]byte(msg)) + if err != nil { + klog.Errorf("failed to write error message to probe file: %v", err) + } + + _ = f.Close() +} diff --git a/proxy/pkg/util/util_test.go b/proxy/pkg/util/util_test.go new file mode 100644 index 000000000..7aaf86e9c --- /dev/null +++ b/proxy/pkg/util/util_test.go @@ -0,0 +1,281 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "io/ioutil" + "net/http" + "net/url" + "strings" + "testing" + "time" +) + +func newTTPRequest() *http.Request { + req, _ := http.NewRequest("GET", "http://127.0.0.1:3002/metrics/query?query=foo", nil) + req.Header.Set("X-Forwarded-User", "test") + return req +} + +func createFakeServerWithInvalidJSON(port string, t *testing.T) { + server := http.NewServeMux() + server.HandleFunc("/", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("invalid json")) + }, + ) + err := http.ListenAndServe(":"+port, server) + if err != nil { + t.Fatal("fail to create internal server at " + port) + } +} + +func createFakeServer(port string, t *testing.T) { + server := http.NewServeMux() + projectList := `{ + "kind": "ProjectList", + "apiVersion": "project.openshift.io/v1", + "metadata": { + "selfLink": "/apis/project.openshift.io/v1/projects" + }, + "items": [ + { + "metadata": { + "name": "c0", + "selfLink": "/apis/project.openshift.io/v1/projects/c0", + "uid": "2f68fd63-097c-4519-8e8f-823bb0106acc", + "resourceVersion": "7723", + "creationTimestamp": "2020-09-25T13:35:09Z", + "annotations": { + "openshift.io/sa.scc.mcs": "s0:c11,c10", + "openshift.io/sa.scc.supplemental-groups": "1000130000/10000", + "openshift.io/sa.scc.uid-range": "1000130000/10000" + } + }, + "spec": { + "finalizers": [ + "kubernetes" + ] + }, + "status": { + "phase": "Active" + } + }, + { + "metadata": { + "name": "c1", + "selfLink": "/apis/project.openshift.io/v1/projects/c1", + "uid": "bce1176f-6dda-45ee-99ef-675a64300643", + "resourceVersion": "59984227", + "creationTimestamp": "2020-11-26T08:34:15Z", + "annotations": { + "openshift.io/sa.scc.mcs": "s0:c25,c0", + "openshift.io/sa.scc.supplemental-groups": "1000600000/10000", + "openshift.io/sa.scc.uid-range": "1000600000/10000" + } + }, + "spec": { + "finalizers": [ + "kubernetes" + ] + }, + "status": { + "phase": "Active" + } + } + ] + }` + server.HandleFunc("/", + func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(projectList)) + }, + ) + err := http.ListenAndServe(":"+port, server) + if err != nil { + t.Fatal("fail to create internal server at " + port) + } +} +func TestModifyMetricsQueryParams(t *testing.T) { + testCaseList := []struct { + name string + clusters map[string]string + expected int + }{ + {"1 cluster", map[string]string{"c0": "c0"}, 1}, + {"2 clusters", map[string]string{"c0": "c0", "c2": "c2"}, 2}, + {"no cluster", map[string]string{}, 0}, + } + allManagedClusterNames = map[string]string{"c0": "c0", "c1": "c1"} + for _, c := range testCaseList { + allManagedClusterNames = c.clusters + if len(GetAllManagedClusterNames()) != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, len(GetAllManagedClusterNames()), c.expected) + } + } + +} + +func TestGetAllManagedClusterNames(t *testing.T) { + testCaseList := []struct { + name string + clusters map[string]string + expected string + }{ + {"do not need modify params", map[string]string{"c0": "c0"}, "query=foo"}, + {"modify params with 1 cluster", map[string]string{"c0": "c0", "c2": "c2"}, `query=foo%7Bcluster%3D%22c0%22%7D`}, + {"modify params with all cluster", map[string]string{"c0": "c0", "c1": "c1"}, `query=foo`}, + {"no cluster", map[string]string{}, "query=foo"}, + } + go createFakeServer("3002", t) + time.Sleep(time.Second) + for _, c := range testCaseList { + allManagedClusterNames = c.clusters + req := newTTPRequest() + ModifyMetricsQueryParams(req, "http://127.0.0.1:3002/") + if req.URL.RawQuery != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, req.URL.RawQuery, c.expected) + } + } +} + +func TestContains(t *testing.T) { + testCaseList := []struct { + name string + list []string + s string + expected bool + }{ + {"contain sub string", []string{"a", "b"}, "a", true}, + {"shoud contain empty string", []string{""}, "", true}, + {"should not contain sub string", []string{"a", "b"}, "c", false}, + {"shoud not contain empty string", []string{"a", "b"}, "", false}, + } + + for _, c := range testCaseList { + output := Contains(c.list, c.s) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestRewriteQuery(t *testing.T) { + testCaseList := []struct { + name string + urlValue url.Values + clusterList []string + key string + expected string + }{ + { + "should not rewrite", + map[string][]string{}, + []string{"c1", "c2"}, + "key", + "", + }, + + { + "should rewrite", + map[string][]string{"key": []string{"value"}}, + []string{"c1", "c2"}, + "key", + "value{cluster=~\"c1|c2\"}", + }, + + { + "empty cluster list", + map[string][]string{"key": []string{"value"}}, + []string{}, + "key", + "value{cluster=~\"\"}", + }, + } + + for _, c := range testCaseList { + output := rewriteQuery(c.urlValue, c.clusterList, c.key) + if output.Get(c.key) != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestCanAccessAllClusters(t *testing.T) { + testCaseList := []struct { + name string + projectList []string + clusterList map[string]string + expected bool + }{ + {"no cluster and project", []string{}, map[string]string{}, false}, + {"should access all cluster", []string{"c1", "c2"}, map[string]string{"c1": "c1", "c2": "c2"}, true}, + {"should not access all cluster", []string{"c1"}, map[string]string{"c1": "c1", "c2": "c2"}, false}, + {"no project", []string{}, map[string]string{"c1": "c1", "c2": "c2"}, false}, + } + + for _, c := range testCaseList { + allManagedClusterNames = c.clusterList + output := canAccessAllClusters(c.projectList) + if output != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestFetchUserProjectList(t *testing.T) { + testCaseList := []struct { + name string + token string + url string + expected int + }{ + {"get 2 projects", "", "http://127.0.0.1:4002/", 2}, + {"invalid url", "", "http://127.0.0.1:300/", 0}, + } + go createFakeServer("4002", t) + time.Sleep(time.Second) + + for _, c := range testCaseList { + output := FetchUserProjectList(c.token, c.url) + if len(output) != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, len(output), c.expected) + } + } + + go createFakeServerWithInvalidJSON("5002", t) + output := FetchUserProjectList("", "http://127.0.0.1:5002/") + if len(output) != 0 { + t.Errorf("case (invalid json) output: (%v) is not the expected: (0)", len(output)) + } +} + +func TestGetUserClusterList(t *testing.T) { + testCaseList := []struct { + name string + projectList []string + clusterList map[string]string + expected int + }{ + {"no project", []string{}, map[string]string{}, 0}, + {"should get 1 cluster", []string{"c1", "c2"}, map[string]string{"c1": "c1"}, 1}, + {"should get 2 cluster", []string{"c1", "c2"}, map[string]string{"c1": "c1", "c2": "c2"}, 2}, + {"no cluster", []string{"c1"}, map[string]string{}, 0}, + } + + for _, c := range testCaseList { + allManagedClusterNames = c.clusterList + output := getUserClusterList(c.projectList) + if len(output) != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, output, c.expected) + } + } +} + +func TestWriteError(t *testing.T) { + writeError("test") + data, _ := ioutil.ReadFile("/tmp/health") + if !strings.Contains(string(data), "test") { + t.Errorf("failed to find the health file") + } +} diff --git a/sonar-project.properties b/sonar-project.properties new file mode 100644 index 000000000..8c70d3fb7 --- /dev/null +++ b/sonar-project.properties @@ -0,0 +1,10 @@ +sonar.projectKey=open-cluster-management_multicluster-observability-operator +sonar.projectName=multicluster-observability-operator +sonar.sources=. +sonar.exclusions=**/*_test.go,**/*_generated*.go,**/*_generated/**,**/vendor/** +sonar.tests=. +sonar.test.inclusions=**/*_test.go +sonar.test.exclusions=**/*_generated*.go,**/*_generated/**,**/vendor/** +sonar.go.tests.reportPaths=report.json +sonar.go.coverage.reportPaths=coverage.out +sonar.externalIssuesReportPaths=gosec.json diff --git a/tests/Dockerfile b/tests/Dockerfile new file mode 100644 index 000000000..fd6b4682e --- /dev/null +++ b/tests/Dockerfile @@ -0,0 +1,50 @@ +FROM registry.ci.openshift.org/open-cluster-management/builder:go1.17-linux AS builder + +# install oc into build image +RUN curl -fksSL https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.6.3/openshift-client-linux-4.6.3.tar.gz | tar -xvz -C /usr/local/ oc + +WORKDIR /workspace +# copy go tests into build image +COPY go.sum go.mod ./ +COPY ./tests ./tests + +# compile go tests in build image +RUN go get github.com/onsi/ginkgo/ginkgo@v1.14.2 && go mod vendor && ginkgo build ./tests/pkg/tests/ + +# create new docker image to hold built artifacts +FROM registry.fedoraproject.org/fedora-minimal:32 + +# run as root +USER root + +# expose env vars for runtime +ENV KUBECONFIG "/opt/.kube/config" +ENV IMPORT_KUBECONFIG "/opt/.kube/import-kubeconfig" +ENV OPTIONS "/resources/options.yaml" +ENV REPORT_FILE "/results/results.xml" +ENV GINKGO_DEFAULT_FLAGS "-slowSpecThreshold=120 -timeout 7200s" +ENV GINKGO_NODES "1" +ENV GINKGO_FLAGS="" +ENV GINKGO_FOCUS="" +ENV GINKGO_SKIP="Integration" +ENV SKIP_INTEGRATION_CASES="true" +ENV IS_CANARY_ENV="true" + +# install ginkgo into built image +COPY --from=builder /go/bin/ /usr/local/bin + +# copy oc into built image +COPY --from=builder /usr/local/oc /usr/local/bin/oc +RUN oc version + +WORKDIR /workspace/opt/tests/ +# copy compiled tests into built image +COPY --from=builder /workspace/tests/pkg/tests/tests.test ./observability-e2e-test.test +COPY ./examples /examples +COPY --from=builder /workspace/tests/format-results.sh . + +VOLUME /results + + +# execute compiled ginkgo tests +CMD ["/bin/bash", "-c", "ginkgo --v --focus=${GINKGO_FOCUS} --skip=${GINKGO_SKIP} -nodes=${GINKGO_NODES} --reportFile=${REPORT_FILE} -x -debug -trace observability-e2e-test.test -- -v=3 ; ./format-results.sh ${REPORT_FILE}"] diff --git a/tests/OWNERS b/tests/OWNERS new file mode 100644 index 000000000..a21463147 --- /dev/null +++ b/tests/OWNERS @@ -0,0 +1,8 @@ +approvers: + - morvencao + - songleo +reviewers: + - clyang82 + - marcolan018 + - bjoydeep + - haoqing0110 diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..a1e7e57fe --- /dev/null +++ b/tests/README.md @@ -0,0 +1,276 @@ +# observability-e2e-test + +[![Build](https://img.shields.io/badge/build-Prow-informational)](https://prow.ci.openshift.org/?repo=stolostron%2F${observability-e2e-test}) + +This is modeled after: https://github.com/stolostron/open-cluster-management-e2e + +This is a container which will be called from: + +1. Canary Tests +2. Regular Build PRs + +The tests in this container will: + +1. Create the object store and MCO CR. +2. Wait for the the entire Observability suite (Hub and Addon) installed. +3. Then verify the Observability suite (Hub and Addon) are working as expected including disable/enable addon, grafana verify etc. + +The tests can be running both locally and in [Openshift CI(based on Prow)](https://docs.ci.openshift.org/) in the following two kinds of environment: + +1. a [KinD](https://kind.sigs.k8s.io/) cluster. +2. an OCP cluster with ACM installed with [deploy repo](https://github.com/stolostron/deploy). + +## Run e2e testing automatically + +The observability e2e testing can be running automatically in KinD cluster or OCP cluster. + +### Run locally in KinD cluster + +1. clone this repository and enter its root directory: + +``` +git clone git@github.com:stolostron/multicluster-observability-operator.git && cd multicluster-observability-operator +``` + +2. Optionally override the observability images to test the corresponding components by exporting the following environment variables before running e2e testing: + +| Component Name | Image Environment Variable | +| --- | --- | +| multicluster-observability-operator | MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF | +| rbac-query-proxy | RBAC_QUERY_PROXY_IMAGE_REF | +| metrics-collector | METRICS_COLLECTOR_IMAGE_REF | +| endpoint-monitoring-operator | ENDPOINT_MONITORING_OPERATOR_IMAGE_REF | +| grafana-dashboard-loader | GRAFANA_DASHBOARD_LOADER_IMAGE_REF | +| observatorium-operator | OBSERVATORIUM_OPERATOR_IMAGE_REF | + +For example, if you want to test `metrics-collector` image from `quay.io//metrics-collector:test`, then execute the following command: + +``` +export METRICS_COLLECTOR_IMAGE_REF=quay.io//metrics-collector:test +``` + +> _Note:_ By default, the command will try to install the Observability and its dependencies with images of latest [UPSTREAM snapshot tag](https://quay.io/repository/stolostron/acm-custom-registry?tab=tags). + +3. Then simply execute the following command to run e2e testing in a KinD cluster: + +``` +make e2e-tests-in-kind +``` + +### Run locally in OCP cluster + +If you only have an OCP cluster with ACM installed, then you can run observability e2e testing with the following steps. + +1. clone this repository and enter its root directory: + +``` +git clone git@github.com:stolostron/multicluster-observability-operator.git && cd multicluster-observability-operator +``` + +2. export `KUBECONFIG` environment variable to the kubeconfig of the OCP cluster: + +``` +export KUBECONFIG= +``` + +3. Optionally override the observability images to test the corresponding components by exporting the following environment variables before running e2e testing: + +| Component Name | Image Environment Variable | +| --- | --- | +| multicluster-observability-operator | MULTICLUSTER_OBSERVABILITY_OPERATOR_IMAGE_REF | +| rbac-query-proxy | RBAC_QUERY_PROXY_IMAGE_REF | +| metrics-collector | METRICS_COLLECTOR_IMAGE_REF | +| endpoint-monitoring-operator | ENDPOINT_MONITORING_OPERATOR_IMAGE_REF | +| grafana-dashboard-loader | GRAFANA_DASHBOARD_LOADER_IMAGE_REF | +| observatorium-operator | OBSERVATORIUM_OPERATOR_IMAGE_REF | + +4. Then simply execute the following command to run e2e testing: + +``` +make e2e-tests +``` + +## Run e2e testing manually + +If you want to run observability e2e testing manually, make sure you have cluster with ACM installed. + +## Running e2e testing manually + +1. clone this repository and enter its root directory: + +``` +git clone git@github.com:stolostron/multicluster-observability-operator.git && cd multicluster-observability-operator +``` + +2. Before running the e2e testing, make sure [ginkgo](https://github.com/onsi/ginkgo) is installed: + +``` +go install github.com/onsi/ginkgo/ginkgo@latest +``` + +3. Then copy `tests/resources/options.yaml.template` to `tests/resources/options.yaml`, and update values specific to your environment: + +``` +cp tests/resources/options.yaml.template tests/resources/options.yaml +cat tests/resources/options.yaml +options: + hub: + name: HUB_CLUSTER_NAME + baseDomain: BASE_DOMAIN +``` + +(optional) If there is an imported cluster in the test environment, need to add the cluster info into `options.yaml`: + +``` +cat tests/resources/options.yaml +options: + hub: + name: HUB_CLUSTER_NAME + baseDomain: BASE_DOMAIN + clusters: + - name: IMPORT_CLUSTER_NAME + baseDomain: IMPORT_CLUSTER_BASE_DOMAIN + kubecontext: IMPORT_CLUSTER_KUBE_CONTEXT +``` + +4. Then run e2e testing manually by executing the following command: + +``` +export BUCKET=YOUR_S3_BUCKET +export REGION=YOUR_S3_REGION +export AWS_ACCESS_KEY_ID=YOUR_S3_AWS_ACCESS_KEY_ID +export AWS_SECRET_ACCESS_KEY=YOUR_S3_AWS_SECRET_ACCESS_KEY +export KUBECONFIG=~/.kube/config +ginkgo -v tests/pkg/tests/ -- -options=../../resources/options.yaml -v=3 +``` + +(optional) If there is an imported cluster in the test environment, need to set more environment. + +``` +export IMPORT_KUBECONFIG=~/.kube/import-cluster-config +``` + +## Running e2e testing manually in docker container + +1. clone this repository and enter its root directory: + +``` +git clone git@github.com:stolostron/multicluster-observability-operator.git && cd multicluster-observability-operator +``` + +2. Optionally build docker image for observability e2e testing: + +``` +docker build -t observability-e2e-test:latest -f tests/Dockerfile . +``` + +3. Then copy `tests/resources/options.yaml.template` to `tests/resources/options.yaml`, and update values specific to your environment: + +``` +cp tests/resources/options.yaml.template tests/resources/options.yaml +cat tests/resources/options.yaml +options: + hub: + name: HUB_CLUSTER_NAME + baseDomain: BASE_DOMAIN +``` + +(optional)If there is an imported cluster in the test environment, need to add the cluster info into `options.yaml`: + +``` +cat tests/resources/options.yaml +options: + hub: + name: HUB_CLUSTER_NAME + baseDomain: BASE_DOMAIN + clusters: + - name: IMPORT_CLUSTER_NAME + baseDomain: IMPORT_CLUSTER_BASE_DOMAIN + kubecontext: IMPORT_CLUSTER_KUBE_CONTEXT +``` + +4. copy `tests/resources/env.list.template` to `tests/resources/env.list`, and update values specific to your s3 configuration: + +``` +cp tests/resources/env.list.template tests/resources/env.list +cat tests/resources/env.list +BUCKET=YOUR_S3_BUCKET +REGION=YOUR_S3_REGION +AWS_ACCESS_KEY_ID=YOUR_S3_AWS_ACCESS_KEY_ID +AWS_SECRET_ACCESS_KEY=YOUR_S3_AWS_SECRET_ACCESS_KEY +``` + +5. login to your cluster in which observability is enabled - and make sure that the kubeconfig is located as file `~/.kube/config`: + +``` +kubectl config current-context +admin +``` + +6. (optional) If there is an imported cluster in the test environment, you need to copy the kubeconfig file into as `~/.kube/` as `import-kubeconfig`: + +``` +cp {IMPORT_CLUSTER_KUBE_CONFIG_PATH} ~/.kube/import-kubeconfig +``` + +7. start to run e2e testing in docker container with the following command: + +``` +docker run -v ~/.kube/:/opt/.kube -v $(pwd)/tests/results:/results -v $(pwd)/tests/resources:/resources --env-file $(pwd)/tests/resources/env.list observability-e2e-test:latest +``` + +In Canary environment, this is the container that will be run - and all the volumes etc will passed on while starting the docker container using a helper script. + +## Contributing to E2E + +### Options.yaml + +The values in the options.yaml are optional values read in by E2E. If you do not set an option, the test case that depends on the option should skip the test. The sample values in the option.yaml.template should provide enough context for you fill in with the appropriate values. Further, in the section below, each test should document their test with some detail. + +### Skip install and uninstall + +For developing and testing purposes, you can set the following env to skip the install and uninstall steps to keep your current MCO instance. + +- SKIP_INSTALL_STEP: if set to `true`, the testing will skip the install step +- SKIP_UNINSTALL_STEP: if set to `true`, the testing will skip the uninstall step + +For example, run the following command will skip the install and uninstall step: + +``` +export SKIP_INSTALL_STEP=true +export SKIP_UNINSTALL_STEP=true +export BUCKET=YOUR_S3_BUCKET +export REGION=YOUR_S3_REGION +export AWS_ACCESS_KEY_ID=YOUR_S3_AWS_ACCESS_KEY_ID +export AWS_SECRET_ACCESS_KEY=YOUR_S3_AWS_SECRET_ACCESS_KEY +export KUBECONFIG=~/.kube/config +ginkgo -v -- -options=resources/options.yaml -v=3 +``` + +### Focus Labels + +* Each `It` specification should end with a label which helps automation segregate running of specs. +* The choice of labels is up to the contributor, with the one guideline, that the second label, be `g0-gN`, to indicate the `run level`, with `g0` denoting that this test runs within a few minutes, and `g5` denotes a testcase that will take > 30 minutes to complete. See examples below: + +` It("should have not the expected MCO addon pods (addon/g0)", func() {` + +Examples: + +```yaml + It("should have the expected args in compact pod (reconcile/g0)", func() { + It("should work in basic mode (reconcile/g0)", func() { + It("should have not the expected MCO addon pods (addon/g0)", func() { + It("should have not metric data (addon/g0)", func() { + It("should be able to access the grafana console (grafana/g0)", func() { + It("should have metric data in grafana console (grafana/g0)", func() { + .... +``` + +* The `--focus` and `--skip` are ginkgo directives that allow you to choose what tests to run, by providing a REGEX express to match. Examples of using the focus: + + * `ginkgo --focus="g0"` + * `ginkgo --focus="grafana/g0"` + * `ginkgo --focus="addon"` + +* To run with verbose ginkgo logging pass the `--v` +* To run with klog verbosity, pass the `--focus="g0" -- -v=3` where 3 is the log level: 1-3 diff --git a/tests/benchmark/README.md b/tests/benchmark/README.md new file mode 100644 index 000000000..c48955c5c --- /dev/null +++ b/tests/benchmark/README.md @@ -0,0 +1,33 @@ +# Setup metrics collector + +You can use `setup-metrics-collector.sh` to setup metrics collector to simulate multiple clients push metric data to ACM Hub. This script is for testing purposes only. + +## Prereqs + +You must meet the following requirements to setup metrics collector: + +- ACM 2.1+ available +- `MultiClusterObservability` instance available and have following pods in `open-cluster-management-addon-observability` namespace: + + ``` + $ oc get po -n open-cluster-management-addon-observability + NAME READY STATUS RESTARTS AGE + endpoint-observability-operator-7f8f949bc8-trwzh 2/2 Running 0 118m + metrics-collector-deployment-74cbf5896f-jhg6v 1/1 Running 0 111m + ``` + +## Setup metrics collector + +Use `setup-metrics-collector.sh` to setup metrics collector, you just need provide a number, then this script will create metrics collector in a different namespace. + +``` +./setup-metrics-collector.sh 10 +``` + +## Clean metrics collector + +Use `clean-metrics-collector.sh` to remove all metrics collector you created. + +``` +./clean-metrics-collector.sh 10 +``` \ No newline at end of file diff --git a/tests/benchmark/clean-metrics-collector.sh b/tests/benchmark/clean-metrics-collector.sh new file mode 100755 index 000000000..79833ed37 --- /dev/null +++ b/tests/benchmark/clean-metrics-collector.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +sed_command='sed -i' +managed_cluster='managed' +if [ $# -eq 2 ]; then + managed_cluster=$2 +fi + +if [ $# -lt 1 ]; then + echo "this script must be run with the number of clusters:" + echo -e "\n$0 total_clusters\n" + exit 1 +fi + +re='^[0-9]+$' +if ! [[ $1 =~ $re ]] ; then + echo "error: arguments <$1> not a number" >&2; exit 1 +fi + +for i in $(seq 1 $1) +do + cluster_name=simulate-${managed_cluster}-cluster${i} + kubectl delete deploy -n ${cluster_name} metrics-collector-deployment + kubectl delete clusterrolebinding ${cluster_name}-clusters-metrics-collector-view + kubectl delete -n ${cluster_name} secret/observability-managed-cluster-certs + kubectl delete ns ${cluster_name} +done diff --git a/tests/benchmark/metrics-collector-view.yaml b/tests/benchmark/metrics-collector-view.yaml new file mode 100644 index 000000000..c28cf7314 --- /dev/null +++ b/tests/benchmark/metrics-collector-view.yaml @@ -0,0 +1,14 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __CLUSTER_NAME__-clusters-metrics-collector-view + annotations: + owner: multicluster-operator +subjects: + - kind: ServiceAccount + name: endpoint-observability-operator-sa + namespace: __CLUSTER_NAME__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view \ No newline at end of file diff --git a/tests/benchmark/setup-metrics-collector.sh b/tests/benchmark/setup-metrics-collector.sh new file mode 100755 index 000000000..5fcae62dd --- /dev/null +++ b/tests/benchmark/setup-metrics-collector.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +WORKDIR="$(pwd -P)" +export PATH=${PATH}:${WORKDIR} + +if ! command -v jq &> /dev/null; then + if [[ "$(uname)" == "Linux" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + elif [[ "$(uname)" == "Darwin" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 + fi + chmod +x ./jq +fi + +sed_command='sed -i' +if [[ "$(uname)" == "Darwin" ]]; then + sed_command='sed -i -e' +fi + +managed_cluster='managed' +if [ $# -eq 2 ]; then + managed_cluster=$2 +fi + +if [ $# -lt 1 ]; then + echo "this script must be run with the number of clusters:" + echo -e "\n$0 total_clusters\n" + exit 1 +fi + +re='^[0-9]+$' +if ! [[ $1 =~ $re ]] ; then + echo "error: arguments <$1> not a number" >&2; exit 1 +fi + +for i in $(seq 1 $1) +do + cluster_name=simulate-${managed_cluster}-cluster${i} + kubectl create ns ${cluster_name} + + # create ca/sa/rolebinding for metrics collector + kubectl get configmap metrics-collector-serving-certs-ca-bundle -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-managed-cluster-certs -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get sa endpoint-observability-operator-sa -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl -n ${cluster_name} patch secret observability-managed-cluster-certs --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + kubectl -n ${cluster_name} patch sa endpoint-observability-operator-sa --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + + # deploy metrics collector deployment to cluster ns + deploy_yaml_file=${cluster_name}-metrics-collector-deployment.yaml + kubectl get deploy metrics-collector-deployment -n open-cluster-management-addon-observability -o yaml > $deploy_yaml_file + $sed_command "s~cluster=.*$~cluster=${cluster_name}\"~g" "$deploy_yaml_file" + $sed_command "s~clusterID=.*$~clusterID=$(cat /proc/sys/kernel/random/uuid)\"~g" "$deploy_yaml_file" + $sed_command "s~namespace:\ open-cluster-management-addon-observability~namespace:\ ${cluster_name}~g" "$deploy_yaml_file" + cat "$deploy_yaml_file" | kubectl -n ${cluster_name} apply -f - + rm -rf "$deploy_yaml_file" + kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' + + + # deploy ClusterRoleBinding for read metrics from OCP prometheus + rolebinding_yaml_file=${cluster_name}-metrics-collector-view.yaml + cp -rf metrics-collector-view.yaml "$rolebinding_yaml_file" + $sed_command "s~__CLUSTER_NAME__~${cluster_name}~g" "$rolebinding_yaml_file" + cat "$rolebinding_yaml_file" | kubectl -n ${cluster_name} apply -f - + rm -rf "$rolebinding_yaml_file" + +done diff --git a/tests/format-results.sh b/tests/format-results.sh new file mode 100755 index 000000000..45249b2d3 --- /dev/null +++ b/tests/format-results.sh @@ -0,0 +1,15 @@ +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +# Format the result to add Observability label for BeforeSuite and AfterSuite +# Log a requirement in ginkgo - https://github.com/onsi/ginkgo/issues/795 + +#!/bin/bash + +if [ -z $1 ]; then + echo "Please provide the results file." + exit 1 +fi + +sed -i "s~BeforeSuite~Observability: [P1][Sev1][Observability] Cannot enable observability service successfully~g" $1 +sed -i "s~AfterSuite~Observability: [P1][Sev1][Observability] Cannot uninstall observability service completely~g" $1 diff --git a/tests/grafana-dev-test.sh b/tests/grafana-dev-test.sh new file mode 100755 index 000000000..2b2abffef --- /dev/null +++ b/tests/grafana-dev-test.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# avoid client-side throttling due to HOME=/ +export HOME=/tmp + +base_dir="$(cd "$(dirname "$0")/.." ; pwd -P)" +cd "$base_dir" +obs_namespace=open-cluster-management-observability + +# create a dashboard for test export grafana dashboard +kubectl apply -n "$obs_namespace" -f "$base_dir"/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml + +# test deploy grafana-dev +cd $base_dir/tools +./setup-grafana-dev.sh --deploy +if [ $? -ne 0 ]; then + echo "Failed run setup-grafana-dev.sh --deploy" + exit 1 +fi + +n=0 +until [ "$n" -ge 30 ] +do + kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev | grep "2/2" | grep "Running" && break + n=$((n+1)) + echo "Retrying in 10s for waiting for grafana-dev pod ready ..." + sleep 10 +done + +if [ $n -eq 30 ]; then + echo "Failed waiting for grafana-dev pod ready in 300s" + exit 1 +fi + +podName=$(kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') +if [ $? -ne 0 ] || [ -z "$podName" ]; then + echo "Failed to get grafana pod name, please check your grafana-dev deployment" + exit 1 +fi + +sleep 10 +# create a new test user to test +kubectl -n "$obs_namespace" exec -it "$podName" -c grafana-dashboard-loader -- /usr/bin/curl -XPOST -H "Content-Type: application/json" -H "X-Forwarded-User: WHAT_YOU_ARE_DOING_IS_VOIDING_SUPPORT_0000000000000000000000000000000000000000000000000000000000000000" -d '{ "name":"test", "email":"test", "login":"test", "password":"test" }' '127.0.0.1:3001/api/admin/users' +sleep 30 + +n=0 +until [ "$n" -ge 10 ] +do + # test swith user to grafana admin + ./switch-to-grafana-admin.sh test + if [ $? -eq 0 ]; then + break + fi + n=$((n+1)) + sleep 5 +done +if [ $n -eq 10 ]; then + echo "Failed run switch-to-grafana-admin.sh test" + exit 1 +fi + +n=0 +until [ "$n" -ge 10 ] +do + # test export grafana dashboard + ./generate-dashboard-configmap-yaml.sh "Sample Dashboard for E2E" + if [ $? -eq 0 ]; then + break + fi + n=$((n+1)) + sleep 5 +done +if [ $n -eq 10 ]; then + echo "Failed run generate-dashboard-configmap-yaml.sh" + exit 1 +fi + +# test clean grafan-dev +./setup-grafana-dev.sh --clean +if [ $? -ne 0 ]; then + echo "Failed run setup-grafana-dev.sh --clean" + exit 1 +fi + +# clean test env +kubectl delete -n "$obs_namespace" -f "$base_dir"/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml diff --git a/tests/pkg/kustomize/render.go b/tests/pkg/kustomize/render.go new file mode 100644 index 000000000..4ed09eecb --- /dev/null +++ b/tests/pkg/kustomize/render.go @@ -0,0 +1,34 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package kustomize + +import ( + "sigs.k8s.io/kustomize/api/filesys" + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/yaml" +) + +// Options ... +type Options struct { + KustomizationPath string + OutputPath string +} + +// Render is used to render the kustomization +func Render(o Options) ([]byte, error) { + fSys := filesys.MakeFsOnDisk() + k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) + m, err := k.Run(fSys, o.KustomizationPath) + if err != nil { + return nil, err + } + return m.AsYaml() +} + +// GetLabels return labels +func GetLabels(yamlB []byte) (interface{}, error) { + data := map[string]interface{}{} + err := yaml.Unmarshal(yamlB, &data) + return data["metadata"].(map[string]interface{})["labels"], err +} diff --git a/tests/pkg/kustomize/render_test.go b/tests/pkg/kustomize/render_test.go new file mode 100644 index 000000000..94d643005 --- /dev/null +++ b/tests/pkg/kustomize/render_test.go @@ -0,0 +1,67 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package kustomize + +import ( + "bytes" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" +) + +func TestRender(t *testing.T) { + // Test + buf, err := Render(Options{ + KustomizationPath: "tests", + }) + require.NoError(t, err, "Render()") + rendered := rendered(t, buf) + names := containedNames(rendered) + assert.Equal(t, []string{"thanos-ruler-custom-rules"}, names, "rendered names") + + labels, _ := GetLabels(buf) + for labelName := range labels.(map[string]interface{}) { + assert.Equal(t, "alertname", labelName, "metadata label") + } + + str := string(buf) + pkgLabelPos := strings.Index(str, "alertname: NodeOutOfMemory\n") + assert.True(t, pkgLabelPos > 0, "alertname: NodeOutOfMemory label should be contained") + +} + +func containedNames(rendered []map[string]interface{}) (names []string) { + for _, o := range rendered { + m := o["metadata"] + name := "" + if mm, ok := m.(map[string]interface{}); ok { + name = mm["name"].(string) + } else { + name = m.(map[interface{}]interface{})["name"].(string) + } + names = append(names, name) + } + return +} + +func rendered(t *testing.T, rendered []byte) (r []map[string]interface{}) { + dec := yaml.NewDecoder(bytes.NewReader(rendered)) + o := map[string]interface{}{} + var err error + for ; err == nil; err = dec.Decode(o) { + require.NoError(t, err) + if len(o) > 0 { + r = append(r, o) + o = map[string]interface{}{} + } + } + if err != io.EOF { + require.NoError(t, err) + } + return +} diff --git a/tests/pkg/kustomize/tests/kustomization.yaml b/tests/pkg/kustomize/tests/kustomization.yaml new file mode 100644 index 000000000..880b1cf50 --- /dev/null +++ b/tests/pkg/kustomize/tests/kustomization.yaml @@ -0,0 +1,4 @@ +commonLabels: + alertname: NodeOutOfMemory +resources: +- thanos-ruler-custom-rules-valid.yaml diff --git a/tests/pkg/kustomize/tests/thanos-ruler-custom-rules-valid.yaml b/tests/pkg/kustomize/tests/thanos-ruler-custom-rules-valid.yaml new file mode 100644 index 000000000..7d334bddb --- /dev/null +++ b/tests/pkg/kustomize/tests/thanos-ruler-custom-rules-valid.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: thanos-ruler-custom-rules +data: + custom_rules.yaml: | + groups: + - name: node-health + rules: + - alert: NodeOutOfMemory + expr: instance:node_memory_utilisation:ratio * 100 > 0 + for: 1m + labels: + instance: "{{ $labels.instance }}" + cluster: "{{ $labels.cluster }}" + clusterID: "{{ $labels.clusterID }}" + severity: warning diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list new file mode 100644 index 000000000..67bf12350 --- /dev/null +++ b/tests/pkg/testdata/ignored-metric-list @@ -0,0 +1,79 @@ +authenticated_user_requests +authentication_attempts +assisted_installer_cluster_creations +assisted_installer_cluster_installation_started +assisted_installer_cluster_installation_second +assisted_installer_cluster_host_installation_count +assisted_installer_host_installation_phase_seconds +assisted_installer_cluster_host_disk_sync_duration_ms +assisted_installer_cluster_host_image_pull_status +assisted_installer_filesystem_usage_percentage +cluster:capacity_cpu_cores:sum +cluster:capacity_memory_bytes:sum +cluster:container_cpu_usage:ratio +cluster:container_spec_cpu_shares:ratio +cluster:cpu_usage_cores:sum +cluster:memory_usage:ratio +cluster:memory_usage_bytes:sum +cluster:usage:resources:sum +cluster_infrastructure_provider +cluster_version +cluster_version_payload +container_spec_cpu_quota +coredns_dns_request_count_total +coredns_dns_request_duration_seconds_sum +coredns_dns_request_type_count_total +coredns_dns_response_rcode_count_total +etcd_mvcc_db_total_size_in_bytes +etcd_debugging_snap_save_total_duration_seconds_sum +etcd_disk_backend_commit_duration_seconds_sum +etcd_disk_wal_fsync_duration_seconds_sum +etcd_object_counts +etcd_server_client_requests_total +etcd_server_health_failures +etcd_server_quota_backend_bytes +haproxy_backend_connection_errors_total +haproxy_backend_connections_total +haproxy_backend_current_queue +haproxy_backend_http_average_response_latency_milliseconds +haproxy_backend_max_sessions +haproxy_backend_response_errors_total +haproxy_backend_up +http_requests_total +instance:node_filesystem_usage:sum +kube_daemonset_status_desired_number_scheduled +kube_daemonset_status_number_unavailable +kube_node_spec_unschedulable +kube_node_status_allocatable_cpu_cores +kube_node_status_allocatable_memory_bytes +kube_node_status_capacity +kube_node_status_capacity_pods +kube_node_status_capacity_cpu_cores +kube_node_status_condition +kube_pod_container_resource_limits_cpu_cores +kube_pod_container_resource_limits_memory_bytes +kube_pod_container_resource_requests_cpu_cores +kube_pod_container_resource_requests_memory_bytes +kubelet_running_container_count +kubelet_runtime_operations +kubelet_runtime_operations_latency_microseconds +kubelet_volume_stats_available_bytes +kubelet_volume_stats_capacity_bytes +kube_persistentvolume_status_phase +mixin_pod_workload +namespace:kube_pod_container_resource_requests_cpu_cores:sum +namespace:kube_pod_container_resource_requests_memory_bytes:sum +namespace:container_memory_usage_bytes:sum +namespace_cpu:kube_pod_container_resource_requests:sum +node_filesystem_free_bytes +node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate +policyreport_info +cluster_monitoring_operator_reconcile_errors_total +cluster_monitoring_operator_reconcile_attempts_total +cluster_operator_conditions +cluster_operator_up +cluster:policy_governance_info:propagated_count +cluster:policy_governance_info:propagated_noncompliant_count +policy:policy_governance_info:propagated_count +policy:policy_governance_info:propagated_noncompliant_count +namespace_cpu:kube_pod_container_resource_requests:sum diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go new file mode 100644 index 000000000..dbc091299 --- /dev/null +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -0,0 +1,213 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "flag" + "fmt" + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + "gopkg.in/yaml.v2" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var ( + testHeadless bool + + BearerToken string + baseDomain string + kubeadminUser string + kubeadminCredential string + kubeconfig string + reportFile string + optionsFile string + ownerPrefix, ocpRelease string + + testOptions utils.TestOptions + testOptionsContainer utils.TestOptionsContainer + testUITimeout time.Duration + + testFailed = false +) + +const ( + OCP_RELEASE_DEFAULT = "4.4.4" + charset = "abcdefghijklmnopqrstuvwxyz" + + "0123456789" + + MCO_CR_NAME = "observability" + MCO_NAMESPACE = "open-cluster-management-observability" + MCO_ADDON_NAMESPACE = "open-cluster-management-addon-observability" + MCO_LABEL = "name=multicluster-observability-operator" + MCO_LABEL_OWNER = "owner=multicluster-observability-operator" + + ALERTMANAGER_LABEL = "app=multicluster-observability-alertmanager" + GRAFANA_LABEL = "app=multicluster-observability-grafana" + OBSERVATORIUM_API_LABEL = "app.kubernetes.io/name=observatorium-api" + RBAC_QUERY_PROXY_LABEL = "app=rbac-query-proxy" + + THANOS_COMPACT_LABEL = "app.kubernetes.io/name=thanos-compact" + THANOS_STORE_LABEL = "app.kubernetes.io/name=thanos-store" + THANOS_RECEIVE_LABEL = "app.kubernetes.io/name=thanos-receive" + THANOS_RULE_LABEL = "app.kubernetes.io/name=thanos-rule" + THANOS_QUERY_LABEL = "app.kubernetes.io/name=thanos-query" + THANOS_QUERY_FRONTEND_LABEL = "app.kubernetes.io/name=thanos-query-frontend" + THANOS_QUERY_FRONTEND_MEMCACHED_LABEL = "app.kubernetes.io/component=query-frontend-cache,app.kubernetes.io/name=memcached" + THANOS_STORE_MEMCACHED_LABEL = "app.kubernetes.io/component=store-cache,app.kubernetes.io/name=memcached" +) + +var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + +func StringWithCharset(length int, charset string) string { + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func randString(length int) string { + return StringWithCharset(length, charset) +} + +func init() { + klog.SetOutput(GinkgoWriter) + klog.InitFlags(nil) + + flag.StringVar(&kubeadminUser, "kubeadmin-user", "kubeadmin", "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-user=\"xxxxx\").") + flag.StringVar(&kubeadminCredential, "kubeadmin-credential", "", "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-credential=\"xxxxx-xxxxx-xxxxx-xxxxx\").") + flag.StringVar(&baseDomain, "base-domain", "", "Provide the base domain for the cluster under test (e.g. -base-domain=\"demo.red-chesterfield.com\").") + flag.StringVar(&reportFile, "report-file", "results.xml", "Provide the path to where the junit results will be printed.") + flag.StringVar(&kubeconfig, "kubeconfig", "", "Location of the kubeconfig to use; defaults to KUBECONFIG if not set") + flag.StringVar(&optionsFile, "options", "", "Location of an \"options.yaml\" file to provide input for various tests") +} + +func TestObservabilityE2E(t *testing.T) { + RegisterFailHandler(Fail) + junitReporter := reporters.NewJUnitReporter(reportFile) + RunSpecsWithDefaultAndCustomReporters(t, "Observability E2E Suite", []Reporter{junitReporter}) +} + +var _ = BeforeSuite(func() { + initVars() + installMCO() +}) + +var _ = AfterSuite(func() { + if !testFailed { + uninstallMCO() + } else { + utils.PrintAllMCOPodsStatus(testOptions) + } +}) + +func initVars() { + + // default ginkgo test timeout 30s + // increased from original 10s + testUITimeout = time.Second * 30 + + if optionsFile == "" { + optionsFile = os.Getenv("OPTIONS") + if optionsFile == "" { + optionsFile = "resources/options.yaml" + } + } + + klog.V(1).Infof("options filename=%s", optionsFile) + + data, err := ioutil.ReadFile(optionsFile) + if err != nil { + klog.Errorf("--options error: %v", err) + } + Expect(err).NotTo(HaveOccurred()) + + fmt.Printf("file preview: %s \n", string(optionsFile)) + + err = yaml.Unmarshal([]byte(data), &testOptionsContainer) + if err != nil { + klog.Errorf("--options error: %v", err) + } + + testOptions = testOptionsContainer.Options + + // default Headless is `true` + // to disable, set Headless: false + // in options file + if testOptions.Headless == "" { + testHeadless = true + } else { + if testOptions.Headless == "false" { + testHeadless = false + } else { + testHeadless = true + } + } + + // OwnerPrefix is used to help identify who owns deployed resources + // If a value is not supplied, the default is OS environment variable $USER + if testOptions.OwnerPrefix == "" { + ownerPrefix = os.Getenv("USER") + if ownerPrefix == "" { + ownerPrefix = "ginkgo" + } + } else { + ownerPrefix = testOptions.OwnerPrefix + } + klog.V(1).Infof("ownerPrefix=%s", ownerPrefix) + + if testOptions.Connection.OCPRelease == "" { + ocpRelease = OCP_RELEASE_DEFAULT + } else { + ocpRelease = testOptions.Connection.OCPRelease + } + klog.V(1).Infof("ocpRelease=%s", ocpRelease) + + if testOptions.KubeConfig == "" { + if kubeconfig == "" { + kubeconfig = os.Getenv("KUBECONFIG") + } + testOptions.KubeConfig = kubeconfig + } + + if testOptions.HubCluster.BaseDomain != "" { + baseDomain = testOptions.HubCluster.BaseDomain + + if testOptions.HubCluster.ClusterServerURL == "" { + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", testOptions.HubCluster.BaseDomain) + } + } else { + Expect(baseDomain).NotTo(BeEmpty(), "The `baseDomain` is required.") + testOptions.HubCluster.BaseDomain = baseDomain + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", baseDomain) + } + + if testOptions.HubCluster.User != "" { + kubeadminUser = testOptions.HubCluster.User + } + if testOptions.HubCluster.Password != "" { + kubeadminCredential = testOptions.HubCluster.Password + } + + if testOptions.ManagedClusters != nil && len(testOptions.ManagedClusters) > 0 { + for i, mc := range testOptions.ManagedClusters { + if mc.ClusterServerURL == "" { + testOptions.ManagedClusters[i].ClusterServerURL = fmt.Sprintf("https://api.%s:6443", mc.BaseDomain) + } + if mc.KubeConfig == "" { + testOptions.ManagedClusters[i].KubeConfig = os.Getenv("IMPORT_KUBECONFIG") + } + } + } +} diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go new file mode 100644 index 000000000..b95f2288e --- /dev/null +++ b/tests/pkg/tests/observability_addon_test.go @@ -0,0 +1,191 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "fmt" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + JustBeforeEach(func() { + Eventually(func() error { + clusters, clusterError = utils.ListManagedClusters(testOptions) + if clusterError != nil { + return clusterError + } + return nil + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + Context("[P2][Sev2][Observability] Verify monitoring operator and deployment status when metrics collection disabled (addon/g0) -", func() { + It("[Stable] Should have resource requirement defined in CR", func() { + By("Check addon resource requirement") + res, err := utils.GetMCOAddonSpecResources(testOptions) + Expect(err).ToNot(HaveOccurred()) + limits := res["limits"].(map[string]interface{}) + requests := res["requests"].(map[string]interface{}) + Expect(limits["cpu"]).To(Equal("200m")) + Expect(limits["memory"]).To(Equal("700Mi")) + Expect(requests["cpu"]).To(Equal("10m")) + Expect(requests["memory"]).To(Equal("100Mi")) + }) + + It("[Stable] Should have resource requirement in metrics-collector", func() { + By("Check metrics-collector resource requirement") + Eventually(func() error { + return utils.CheckMCOAddonResources(testOptions) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[Stable] Should not have the expected MCO addon pods when disable observabilityaddon", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, false) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for MCO addon components scales to 0") + Eventually(func() error { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if len(podList.Items) != 0 || err != nil { + return fmt.Errorf("Failed to disable observability addon") + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + Eventually(func() error { + err = utils.CheckAllOBADisabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + }) + // it takes Prometheus 5m to notice a metric is not available - https://github.com/prometheus/prometheus/issues/1810 + // the corret way is use timestamp, for example: + // timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"}) - timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"} offset 1m) > 59 + It("[Stable] Waiting for check no metric data in grafana console", func() { + Eventually(func() error { + for _, cluster := range clusters { + err, hasMetric := utils.ContainManagedClusterMetric(testOptions, `timestamp(node_memory_MemAvailable_bytes{cluster="`+cluster+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+cluster+`"} offset 1m) > 59`, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) + if err != nil && !hasMetric && strings.Contains(err.Error(), "Failed to find metric name from response") { + return nil + } + } + return fmt.Errorf("Check no metric data in grafana console error: %v", err) + }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[Stable] Modifying MCO cr to enable observabilityaddon", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, true) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + + By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + }) + }) + + It("[P3][Sev3][Observability][Stable] Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope (addon/g0)", func() { + By("Set interval to 14") + Eventually(func() bool { + err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) + if strings.Contains(err.Error(), "Invalid value") && + strings.Contains(err.Error(), "15") { + return true + } + klog.V(1).Infof("error message: <%s>\n", err.Error()) + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + + By("Set interval to 3601") + Eventually(func() bool { + err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(3601)) + if strings.Contains(err.Error(), "Invalid value") && + strings.Contains(err.Error(), "3600") { + return true + } + klog.V(1).Infof("error message: <%s>\n", err.Error()) + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + + Context("[P2][Sev2][Observability] Disable the Observability by updating managed cluster label (addon/g0) -", func() { + It("[Stable] Modifying managedcluster cr to disable observability", func() { + Eventually(func() error { + return utils.UpdateObservabilityFromManagedCluster(testOptions, false) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for MCO addon components scales to 0") + Eventually(func() bool { + err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) + if err == nil && obaNS == nil { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + + It("[Stable] Remove disable observability label from the managed cluster", func() { + Eventually(func() error { + return utils.UpdateObservabilityFromManagedCluster(testOptions, true) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + utils.PrintManagedClusterOBAObject(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go new file mode 100644 index 000000000..b214f874b --- /dev/null +++ b/tests/pkg/tests/observability_alert_test.go @@ -0,0 +1,342 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "sort" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/prometheus/alertmanager/api/v2/models" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + statefulsetLabels := [...]string{ + ALERTMANAGER_LABEL, + THANOS_RULE_LABEL, + } + configmap := [...]string{ + "thanos-ruler-default-rules", + "thanos-ruler-custom-rules", + } + secret := "alertmanager-config" + + It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the expected statefulsets (alert/g0)", func() { + By("Checking if STS: Alertmanager and observability-thanos-rule exist") + for _, label := range statefulsetLabels { + sts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{LabelSelector: label}) + Expect(err).NotTo(HaveOccurred()) + for _, stsInfo := range (*sts).Items { + Expect(len(stsInfo.Spec.Template.Spec.Volumes)).Should(BeNumerically(">", 0)) + + if strings.Contains(stsInfo.Name, "-alertmanager") { + By("The statefulset: " + stsInfo.Name + " should have the appropriate secret mounted") + Expect(stsInfo.Spec.Template.Spec.Volumes[0].Secret.SecretName).To(Equal("alertmanager-config")) + } + + if strings.Contains(stsInfo.Name, "-thanos-rule") { + By("The statefulset: " + stsInfo.Name + " should have the appropriate configmap mounted") + Expect(stsInfo.Spec.Template.Spec.Volumes[0].ConfigMap.Name).To(Equal("thanos-ruler-default-rules")) + } + } + } + }) + + It("[P2][Sev2][Observability][Stable] Verify alert is created and received - Should have the expected configmap (alert/g0)", func() { + By("Checking if CM: thanos-ruler-default-rules is existed") + cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(cm.ResourceVersion).ShouldNot(BeEmpty()) + klog.V(3).Infof("Configmap %s does exist", configmap[0]) + }) + + It("[P3][Sev3][Observability][Stable] Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules (alert/g0)", func() { + By("Checking if CM: thanos-ruler-custom-rules not existed") + _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) + + if err == nil { + err = fmt.Errorf("%s exist within the namespace env", configmap[1]) + Expect(err).NotTo(HaveOccurred()) + } + + Expect(err).To(HaveOccurred()) + klog.V(3).Infof("Configmap %s does not exist", configmap[1]) + }) + + It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the expected secret (alert/g0)", func() { + By("Checking if SECRETS: alertmanager-config is existed") + secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(secret.GetName()).To(Equal("alertmanager-config")) + klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) + }) + + It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the alertmanager configured in rule (alert/g0)", func() { + By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") + rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RULE_LABEL, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(rules.Items)).NotTo(Equal(0)) + argList := (*rules).Items[0].Spec.Template.Spec.Containers[0].Args + exists := false + for _, arg := range argList { + if arg == "--alertmanagers.url=http://alertmanager:9093" { + exists = true + break + } + if strings.HasPrefix(arg, "--alertmanagers.config=") { + exists = true + break + } + if strings.HasPrefix(arg, "--alertmanagers.config-file=") { + exists = true + break + } + } + Expect(exists).To(Equal(true)) + klog.V(3).Info("Have the alertmanager url configured in rule") + }) + + It("[P2][Sev2][Observability][Stable] Verify alert is created and received - Should have custom alert generated (alert/g0)", func() { + By("Creating custom alert rules") + + rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RULE_LABEL, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(rules.Items)).NotTo(Equal(0)) + + stsName := (*rules).Items[0].Name + oldSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) + + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_valid"}) + Expect(err).NotTo(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + + ThanosRuleRestarting := false + By("Wait for thanos rule pods are restarted and ready") + // ensure the thanos rule pods are restarted successfully before processing + Eventually(func() error { + if !ThanosRuleRestarting { + newSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) + if oldSts.GetResourceVersion() == newSts.GetResourceVersion() { + return fmt.Errorf("The %s is not being restarted in 10 minutes", stsName) + } else { + ThanosRuleRestarting = true + } + } + + err = utils.CheckStatefulSetPodReady(testOptions, stsName) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + + var labelName, labelValue string + labels, err := kustomize.GetLabels(yamlB) + Expect(err).NotTo(HaveOccurred()) + for labelName = range labels.(map[string]interface{}) { + labelValue = labels.(map[string]interface{})[labelName].(string) + } + + By("Checking alert generated") + Eventually(func() error { + err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, + []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) + return err + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Should modify the SECRET: alertmanager-config (alert/g0)", func() { + By("Editing the secret, we should be able to add the third partying tools integrations") + secret := utils.CreateCustomAlertConfigYaml(testOptions.HubCluster.BaseDomain) + + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, secret)).NotTo(HaveOccurred()) + klog.V(3).Infof("Successfully modified the secret: alertmanager-config") + }) + + It("[P2][Sev2][Observability][Stable] Updated alert rule can take effect automatically - Should have custom alert updated (alert/g0)", func() { + By("Updating custom alert rules") + + yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_invalid"}) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + + var labelName, labelValue string + labels, _ := kustomize.GetLabels(yamlB) + for labelName = range labels.(map[string]interface{}) { + labelValue = labels.(map[string]interface{})[labelName].(string) + } + + By("Checking alert generated") + Eventually(func() error { + err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, + []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) + return err + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) + }) + + It("[P2][Sev2][Observability][Stable] Updated alert rule can take effect automatically - delete the customized rules (alert/g0)", func() { + + rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RULE_LABEL, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(len(rules.Items)).NotTo(Equal(0)) + + stsName := (*rules).Items[0].Name + + oldSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) + Eventually(func() error { + err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Delete(context.TODO(), configmap[1], metav1.DeleteOptions{}) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + ThanosRuleRestarting := false + By("Wait for thanos rule pods are restarted and ready") + // ensure the thanos rule pods are restarted successfully before processing + Eventually(func() error { + if !ThanosRuleRestarting { + newSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) + if oldSts.GetResourceVersion() == newSts.GetResourceVersion() { + return fmt.Errorf("The %s is not being restarted in 10 minutes", stsName) + } else { + ThanosRuleRestarting = true + } + } + + err = utils.CheckStatefulSetPodReady(testOptions, stsName) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + + klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") + }) + + It("[P2][Sev2][Observability][Integration] Should have alert named Watchdog forwarded to alertmanager (alertforward/g0)", func() { + amURL := url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + q := amURL.Query() + q.Set("filter", "alertname=Watchdog") + amURL.RawQuery = q.Encode() + + caCrt, err := utils.GetRouterCA(hubClient) + Expect(err).NotTo(HaveOccurred()) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(caCrt) + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + }, + } + + alertGetReq, err := http.NewRequest("GET", amURL.String(), nil) + Expect(err).NotTo(HaveOccurred()) + + if os.Getenv("IS_KIND_ENV") != "true" { + alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) + } + + expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + Expect(err).NotTo(HaveOccurred()) + expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) + Expect(err).NotTo(HaveOccurred()) + expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) + + By("Checking Watchdog alerts are forwarded to the hub") + Eventually(func() error { + resp, err := client.Do(alertGetReq) + if err != nil { + klog.Errorf("err: %+v\n", err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("err: %+v\n", resp) + return fmt.Errorf("Failed to get alerts via alertmanager route with http reponse: %v", resp) + } + + alertResult, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + postableAlerts := models.PostableAlerts{} + err = json.Unmarshal(alertResult, &postableAlerts) + if err != nil { + return err + } + + clusterIDsInAlerts := []string{} + for _, alt := range postableAlerts { + if alt.Labels != nil { + labelSets := map[string]string(alt.Labels) + clusterID := labelSets["cluster"] + if clusterID != "" { + clusterIDsInAlerts = append(clusterIDsInAlerts, clusterID) + } + } + } + + sort.Strings(clusterIDsInAlerts) + sort.Strings(expectClusterIdentifiers) + if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) { + return fmt.Errorf("Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster") + } + + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go new file mode 100644 index 000000000..afa6baf04 --- /dev/null +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -0,0 +1,159 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("[P1][Sev1][Observability][Integration] Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated (certrenew/g0)", func() { + By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") + // sleep 30s to wait for installation is ready + time.Sleep(30 * time.Second) + collectorPodName := "" + hubPodsName := []string{} + Eventually(func() bool { + if collectorPodName == "" { + _, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if podList != nil && len(podList.Items) > 0 { + collectorPodName = podList.Items[0].Name + } + } + if collectorPodName == "" { + return false + } + hubPodsName = []string{} + _, apiPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app.kubernetes.io/name=observatorium-api") + if apiPodList != nil && len(apiPodList.Items) != 0 { + for _, pod := range apiPodList.Items { + hubPodsName = append(hubPodsName, pod.Name) + } + } else { + return false + } + _, rbacPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app=rbac-query-proxy") + if rbacPodList != nil && len(rbacPodList.Items) != 0 { + for _, pod := range rbacPodList.Items { + hubPodsName = append(hubPodsName, pod.Name) + } + } else { + return false + } + + return true + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + + By("Deleting certificate secret to simulate certificate renew") + err := utils.DeleteCertSecret(testOptions) + Expect(err).ToNot(HaveOccurred()) + + By(fmt.Sprintf("Waiting for old pods removed: %v and new pods created", hubPodsName)) + Eventually(func() bool { + err1, appPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app.kubernetes.io/name=observatorium-api") + err2, rbacPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app=rbac-query-proxy") + if err1 == nil && err2 == nil { + if len(hubPodsName) != len(appPodList.Items)+len(rbacPodList.Items) { + klog.V(1).Infof("Wrong number of pods: <%d> observatorium-api pods and <%d> rbac-query-proxy pods", len(appPodList.Items), len(rbacPodList.Items)) + return false + } + for _, oldPodName := range hubPodsName { + for _, pod := range appPodList.Items { + if oldPodName == pod.Name { + klog.V(1).Infof("<%s> not removed yet", oldPodName) + return false + } + if pod.Status.Phase != "Running" { + klog.V(1).Infof("<%s> not in Running status yet", pod.Name) + return false + } + } + for _, pod := range rbacPodList.Items { + if oldPodName == pod.Name { + klog.V(1).Infof("<%s> not removed yet", oldPodName) + return false + } + if pod.Status.Phase != "Running" { + klog.V(1).Infof("<%s> not in Running status yet", pod.Name) + return false + } + } + } + return true + } + + // debug code to check label "cert/time-restarted" + deploys, err := utils.GetDeploymentWithLabel(testOptions, true, OBSERVATORIUM_API_LABEL, MCO_NAMESPACE) + if err == nil { + for _, deployInfo := range (*deploys).Items { + klog.V(1).Infof("labels: <%v>", deployInfo.Spec.Template.ObjectMeta.Labels) + } + } + + deploys, err = utils.GetDeploymentWithLabel(testOptions, true, RBAC_QUERY_PROXY_LABEL, MCO_NAMESPACE) + if err == nil { + for _, deployInfo := range (*deploys).Items { + klog.V(1).Infof("labels: <%v>", deployInfo.Spec.Template.ObjectMeta.Labels) + } + } + + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + + By(fmt.Sprintf("Waiting for old pod <%s> removed and new pod created", collectorPodName)) + Eventually(func() bool { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if err == nil { + for _, pod := range podList.Items { + if pod.Name != collectorPodName { + if pod.Status.Phase != "Running" { + klog.V(1).Infof("<%s> not in Running status yet", pod.Name) + return false + } + return true + } + } + + } + // debug code to check label "cert/time-restarted" + deployment, err := utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + if err == nil { + klog.V(1).Infof("labels: <%v>", deployment.Spec.Template.ObjectMeta.Labels) + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go new file mode 100644 index 000000000..01d0e284e --- /dev/null +++ b/tests/pkg/tests/observability_config_test.go @@ -0,0 +1,236 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("@BVT - [P1][Sev1][Observability][Stable] Verify metrics data global setting on the managed cluster (config/g0)", func() { + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + Skip("Skip the case due to MCO CR was created customized") + } + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + observabilityAddonSpec := mcoRes.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{}) + Expect(observabilityAddonSpec["enableMetrics"]).To(Equal(true)) + Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) + }) + + It("@BVT - [P1][Sev1][Observability][Stable] Verify MCO CR storage class and PVC (config/g0)", func() { + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + Skip("Skip the case due to MCO CR was created customized") + } + mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + spec := mcoSC.Object["spec"].(map[string]interface{}) + scInCR := spec["storageConfig"].(map[string]interface{})["storageClass"].(string) + + scList, _ := hubClient.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{}) + scMatch := false + defaultSC := "" + for _, sc := range scList.Items { + if sc.Annotations["storageclass.kubernetes.io/is-default-class"] == "true" { + defaultSC = sc.Name + } + if sc.Name == scInCR { + scMatch = true + } + } + expectedSC := defaultSC + if scMatch { + expectedSC = scInCR + } + + Eventually(func() error { + pvcList, err := hubClient.CoreV1().PersistentVolumeClaims(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + for _, pvc := range pvcList.Items { + //for KinD cluster, we use minio as object storage. the size is 1Gi. + if pvc.GetName() != "minio" { + scName := *pvc.Spec.StorageClassName + statusPhase := pvc.Status.Phase + if scName != expectedSC || statusPhase != "Bound" { + return fmt.Errorf("PVC check failed, scName = %s, expectedSC = %s, statusPhase = %s", scName, expectedSC, statusPhase) + } + } + } + return nil + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + componentMap := map[string]struct { + // deployment or statefulset + Type string + Label string + }{ + "alertmanager": { + Type: "Statefulset", + Label: ALERTMANAGER_LABEL, + }, + "grafana": { + Type: "Deployment", + Label: GRAFANA_LABEL, + }, + "observatoriumAPI": { + Type: "Deployment", + Label: OBSERVATORIUM_API_LABEL, + }, + "rbacQueryProxy": { + Type: "Deployment", + Label: RBAC_QUERY_PROXY_LABEL, + }, + "compact": { + Type: "Statefulset", + Label: THANOS_COMPACT_LABEL, + }, + "query": { + Type: "Deployment", + Label: THANOS_QUERY_LABEL, + }, + "queryFrontend": { + Type: "Deployment", + Label: THANOS_QUERY_FRONTEND_LABEL, + }, + "queryFrontendMemcached": { + Type: "Statefulset", + Label: THANOS_QUERY_FRONTEND_MEMCACHED_LABEL, + }, + "receive": { + Type: "Statefulset", + Label: THANOS_RECEIVE_LABEL, + }, + "rule": { + Type: "Statefulset", + Label: THANOS_RULE_LABEL, + }, + "storeMemcached": { + Type: "Statefulset", + Label: THANOS_STORE_MEMCACHED_LABEL, + }, + "store": { + Type: "Statefulset", + Label: THANOS_STORE_LABEL, + }, + } + + It("@BVT - [P1][Sev1][Observability][Integration] Verify the replica in advanced config for Observability components (config/g0)", func() { + + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + + spec := mcoRes.Object["spec"].(map[string]interface{}) + if _, adv := spec["advanced"]; !adv { + Skip("Skip the case since the MCO CR did not have advanced spec configed") + } + + advancedSpec := mcoRes.Object["spec"].(map[string]interface{})["advanced"].(map[string]interface{}) + + for key, component := range componentMap { + if key == "compact" || key == "store" { + continue + } + klog.V(1).Infof("The component is: %s\n", key) + replicas := advancedSpec[key].(map[string]interface{})["replicas"] + if component.Type == "Deployment" { + deploys, err := utils.GetDeploymentWithLabel(testOptions, true, component.Label, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + for _, deployInfo := range (*deploys).Items { + Expect(int(replicas.(int64))).To(Equal(int(*deployInfo.Spec.Replicas))) + } + } else { + sts, err := utils.GetStatefulSetWithLabel(testOptions, true, component.Label, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + for _, stsInfo := range (*sts).Items { + Expect(int(replicas.(int64))).To(Equal(int(*stsInfo.Spec.Replicas))) + } + } + } + }) + + It("[P2][Sev2][Observability][Integration] Persist advance values in MCO CR - Checking resources in advanced config (config/g0)", func() { + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + + spec := mcoRes.Object["spec"].(map[string]interface{}) + if _, adv := spec["advanced"]; !adv { + Skip("Skip the case since the MCO CR did not have advanced spec configed") + } + + advancedSpec := mcoRes.Object["spec"].(map[string]interface{})["advanced"].(map[string]interface{}) + + for key, component := range componentMap { + klog.V(1).Infof("The component is: %s\n", key) + resources := advancedSpec[key].(map[string]interface{})["resources"] + limits := resources.(map[string]interface{})["limits"].(map[string]interface{}) + var cpu string + switch v := limits["cpu"].(type) { + case int64: + cpu = fmt.Sprint(v) + default: + cpu = limits["cpu"].(string) + } + if component.Type == "Deployment" { + deploys, err := utils.GetDeploymentWithLabel(testOptions, true, component.Label, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + for _, deployInfo := range (*deploys).Items { + Expect(cpu).To(Equal(deployInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())) + Expect(limits["memory"]).To(Equal(deployInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())) + } + } else { + sts, err := utils.GetStatefulSetWithLabel(testOptions, true, component.Label, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + for _, stsInfo := range (*sts).Items { + Expect(cpu).To(Equal(stsInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())) + Expect(limits["memory"]).To(Equal(stsInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())) + } + } + } + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go new file mode 100644 index 000000000..380961a40 --- /dev/null +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -0,0 +1,79 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +const ( + dashboardName = "sample-dashboard" + dashboardTitle = "Sample Dashboard for E2E" + updateDashboardTitle = "Update Sample Dashboard for E2E" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap (dashboard/g0)", func() { + By("Creating custom dashboard configmap") + yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Eventually(func() bool { + _, result := utils.ContainDashboard(testOptions, dashboardTitle) + return result + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + + It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated (dashboard/g0)", func() { + By("Updating custom dashboard configmap") + yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Eventually(func() bool { + _, result := utils.ContainDashboard(testOptions, dashboardTitle) + return result + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeFalse()) + Eventually(func() bool { + _, result := utils.ContainDashboard(testOptions, updateDashboardTitle) + return result + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + + It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed (dashboard/g0)", func() { + By("Deleting custom dashboard configmap") + err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + _, result := utils.ContainDashboard(testOptions, updateDashboardTitle) + return result + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeFalse()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go new file mode 100644 index 000000000..bc7bfc9ef --- /dev/null +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -0,0 +1,164 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + appv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + Context("[P2][Sev2][Observability] Verify metrics collector is prevent to be configured manually (endpoint_preserve/g0) -", func() { + newDep := &appv1.Deployment{} + It("[Stable] Deleting metrics-collector deployment", func() { + var ( + err error + dep *appv1.Deployment + ) + Eventually(func() error { + dep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() error { + err = utils.DeleteDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() bool { + newDep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + if err == nil { + if dep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion { + return true + } + } + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + It("[Stable] Updating metrics-collector deployment", func() { + updateSaName := "test-serviceaccount" + Eventually(func() error { + newDep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + if err != nil { + return err + } + newDep.Spec.Template.Spec.ServiceAccountName = updateSaName + newDep, err = utils.UpdateDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE, newDep) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() bool { + revertDep, err := utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + if err == nil { + if revertDep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion && + revertDep.Spec.Template.Spec.ServiceAccountName != updateSaName { + return true + } + } + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + }) + + It("[P2][Sev2][Observability][Stable] Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding (endpoint_preserve/g0)", func() { + if os.Getenv("IS_KIND_ENV") == "true" { + Skip("Skip the case due to run in KinD") + } + + By("Deleting metrics-collector-view clusterolebinding") + err, crb := utils.GetCRB(testOptions, false, "metrics-collector-view") + Expect(err).ToNot(HaveOccurred()) + err = utils.DeleteCRB(testOptions, false, "metrics-collector-view") + Expect(err).ToNot(HaveOccurred()) + newCrb := &rbacv1.ClusterRoleBinding{} + Eventually(func() bool { + err, newCrb = utils.GetCRB(testOptions, false, "metrics-collector-view") + if err == nil { + if crb.ObjectMeta.ResourceVersion != newCrb.ObjectMeta.ResourceVersion { + return true + } + } + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + + By("Updating metrics-collector-view clusterolebinding") + updateSubName := "test-subject" + newCrb.Subjects[0].Name = updateSubName + err, _ = utils.UpdateCRB(testOptions, false, "metrics-collector-view", newCrb) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + err, revertCrb := utils.GetCRB(testOptions, false, "metrics-collector-view") + if err == nil { + if revertCrb.ObjectMeta.ResourceVersion != newCrb.ObjectMeta.ResourceVersion && + revertCrb.Subjects[0].Name != updateSubName { + return true + } + } + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + + It("[P2][Sev2][Observability][Stable] Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted (endpoint_preserve/g0)", func() { + if os.Getenv("IS_KIND_ENV") == "true" { + Skip("Skip the case due to run in KinD") + } + + By("Deleting metrics-collector-serving-certs-ca-bundle configmap") + var ( + err error + cm *v1.ConfigMap + ) + Eventually(func() error { + err, cm = utils.GetConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + Eventually(func() error { + err = utils.DeleteConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + newCm := &v1.ConfigMap{} + Eventually(func() bool { + err, newCm = utils.GetConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + if err == nil { + if cm.ObjectMeta.ResourceVersion != newCm.ObjectMeta.ResourceVersion { + return true + } + } + return false + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go new file mode 100644 index 000000000..5562e423f --- /dev/null +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -0,0 +1,42 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "bytes" + "os/exec" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + + // Do not need to run this case in canary environment + // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist + It("[P1][Sev1][Observability][Integration] Setup a Grafana develop instance (grafana_dev/g0)", func() { + cmd := exec.Command("../../grafana-dev-test.sh") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + klog.V(1).Infof("the output of grafana-dev-test.sh: %v", out.String()) + Expect(err).NotTo(HaveOccurred()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go new file mode 100644 index 000000000..a8f136ceb --- /dev/null +++ b/tests/pkg/tests/observability_grafana_test.go @@ -0,0 +1,57 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("@BVT - [P1][Sev1][Observability][Stable] Verify Grafana - Should have metric data in grafana console (grafana/g0)", func() { + Eventually(func() error { + clusters, err := utils.ListManagedClusters(testOptions) + if err != nil { + return err + } + for _, cluster := range clusters { + query := fmt.Sprintf("node_memory_MemAvailable_bytes{cluster=\"%s\"}", cluster) + err, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) + if err != nil { + return err + } + } + return nil + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go new file mode 100644 index 000000000..238501b28 --- /dev/null +++ b/tests/pkg/tests/observability_install_test.go @@ -0,0 +1,187 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + "os" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +func installMCO() { + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + return + } + + hubClient := utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient := utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + By("Checking MCO operator is started up and running") + podList, err := hubClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: MCO_LABEL}) + Expect(len(podList.Items)).To(Equal(1)) + Expect(err).NotTo(HaveOccurred()) + var ( + mcoPod = "" + mcoNs = "" + ) + for _, pod := range podList.Items { + mcoPod = pod.GetName() + mcoNs = pod.GetNamespace() + Expect(string(mcoPod)).NotTo(Equal("")) + Expect(string(pod.Status.Phase)).To(Equal("Running")) + } + + // print mco logs if MCO installation failed + defer func(testOptions utils.TestOptions, isHub bool, namespace, podName, containerName string, previous bool, tailLines int64) { + if testFailed { + mcoLogs, err := utils.GetPodLogs(testOptions, isHub, namespace, podName, containerName, previous, tailLines) + Expect(err).NotTo(HaveOccurred()) + fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed failed, checking MCO operator logs:\n%s\n", mcoLogs) + } else { + fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed successfully!\n") + } + }(testOptions, false, mcoNs, mcoPod, "multicluster-observability-operator", false, 1000) + + By("Checking Required CRDs are created") + Eventually(func() error { + return utils.HaveCRDs(testOptions.HubCluster, testOptions.KubeConfig, + []string{ + "multiclusterobservabilities.observability.open-cluster-management.io", + "observatoria.core.observatorium.io", + "observabilityaddons.observability.open-cluster-management.io", + }) + }).Should(Succeed()) + + Expect(utils.CreateMCONamespace(testOptions)).NotTo(HaveOccurred()) + if os.Getenv("IS_CANARY_ENV") == "true" { + Expect(utils.CreatePullSecret(testOptions, mcoNs)).NotTo(HaveOccurred()) + Expect(utils.CreateObjSecret(testOptions)).NotTo(HaveOccurred()) + } else { + By("Creating Minio as object storage") + //set resource quota and limit range for canary environment to avoid destruct the node + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/minio"}) + Expect(err).NotTo(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + } + + //set resource quota and limit range for canary environment to avoid destruct the node + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/policy"}) + Expect(err).NotTo(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + + By("Creating the MCO testing RBAC resources") + Expect(utils.CreateMCOTestingRBAC(testOptions)).NotTo(HaveOccurred()) + + if os.Getenv("SKIP_INTEGRATION_CASES") != "true" { + By("Creating MCO instance of v1beta1") + v1beta1KustomizationPath := "../../../examples/mco/e2e/v1beta1" + yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta1KustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + + By("Waiting for MCO ready status") + allPodsIsReady := false + Eventually(func() error { + instance, err := dynClient.Resource(utils.NewMCOGVRV1BETA1()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err == nil { + allPodsIsReady = utils.StatusContainsTypeEqualTo(instance, "Ready") + if allPodsIsReady { + testFailed = false + return nil + } + } + testFailed = true + if instance != nil && instance.Object != nil { + return fmt.Errorf("MCO componnets cannot be running in 20 minutes. check the MCO CR status for the details: %v", instance.Object["status"]) + } else { + return fmt.Errorf("Wait for reconciling.") + } + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Check clustermanagementaddon CR is created") + Eventually(func() error { + _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Check the api conversion is working as expected") + v1beta1Tov1beta2GoldenPath := "../../../examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml" + err = utils.CheckMCOConversion(testOptions, v1beta1Tov1beta2GoldenPath) + Expect(err).NotTo(HaveOccurred()) + } + + By("Apply MCO instance of v1beta2") + v1beta2KustomizationPath := "../../../examples/mco/e2e/v1beta2" + yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta2KustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + + // add retry for update mco object failure + Eventually(func() error { + return utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + // wait for pod restarting + time.Sleep(60 * time.Second) + + By("Waiting for MCO ready status") + Eventually(func() error { + err = utils.CheckMCOComponents(testOptions) + if err != nil { + testFailed = true + utils.PrintAllMCOPodsStatus(testOptions) + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*25, EventuallyIntervalSecond*10).Should(Succeed()) + + By("Check endpoint-operator and metrics-collector pods are ready") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*10).Should(Succeed()) + + By("Check clustermanagementaddon CR is created") + Eventually(func() error { + _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + BearerToken, err = utils.FetchBearerToken(testOptions) + if err != nil { + klog.Errorf("fetch bearer token error: %v", err) + } + Expect(BearerToken).NotTo(BeEmpty(), "failed to fetch `BearerToken`") +} diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go new file mode 100644 index 000000000..7af3d5093 --- /dev/null +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -0,0 +1,111 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "errors" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + Context("[P2][Sev2][Observability][Stable] Should be automatically created within 1 minute when delete manifestwork (manifestwork/g0) -", func() { + manifestWorkName := "endpoint-observability-work" + clientDynamic := utils.GetKubeClientDynamic(testOptions, true) + clusterName := utils.GetManagedClusterName(testOptions) + if clusterName != "" { + oldManifestWorkResourceVersion := "" + oldCollectorPodName := "" + _, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if podList != nil && len(podList.Items) > 0 { + oldCollectorPodName = podList.Items[0].Name + } + + Eventually(func() error { + oldManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) + oldManifestWorkResourceVersion = oldManifestWork.GetResourceVersion() + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for manifestwork to be deleted") + Eventually(func() error { + err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Delete(context.TODO(), manifestWorkName, metav1.DeleteOptions{}) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for manifestwork to be created automatically") + Eventually(func() error { + newManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) + if err == nil { + if newManifestWork.GetResourceVersion() != oldManifestWorkResourceVersion { + return nil + } else { + return errors.New("No new manifestwork generated") + } + } else { + return err + } + }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) + + It("[Stable] Waiting for metrics collector to be created automatically", func() { + Eventually(func() error { + _, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if podList != nil && len(podList.Items) > 0 { + if oldCollectorPodName != podList.Items[0].Name { + return nil + } + } + return errors.New("No new metrics collector generated") + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[Stable] Checking OBA components are ready", func() { + Eventually(func() error { + err = utils.CheckOBAComponents(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[Stable] Checking metric to ensure that no data is lost in 1 minute", func() { + Eventually(func() error { + err, _ = utils.ContainManagedClusterMetric(testOptions, `timestamp(node_memory_MemAvailable_bytes{cluster="`+clusterName+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+clusterName+`"} offset 1m) > 59`, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*3).Should(Succeed()) + }) + } + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go new file mode 100644 index 000000000..d9daa8ff6 --- /dev/null +++ b/tests/pkg/tests/observability_metrics_test.go @@ -0,0 +1,147 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +const ( + allowlistCMname = "observability-metrics-custom-allowlist" +) + +var ( + clusters []string + clusterError error + metricslistError error +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + JustBeforeEach(func() { + Eventually(func() error { + clusters, clusterError = utils.ListManagedClusters(testOptions) + if clusterError != nil { + return clusterError + } + return nil + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Integration] Customized metrics data are collected (metrics/g0)", func() { + By("Adding custom metrics allowlist configmap") + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) + Expect(err).ToNot(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + + By("Waiting for new added metrics on grafana console") + Eventually(func() error { + for _, cluster := range clusters { + query := fmt.Sprintf("node_memory_Active_bytes{cluster=\"%s\"} offset 1m", cluster) + err, _ := utils.ContainManagedClusterMetric(testOptions, query, []string{`"__name__":"node_memory_Active_bytes"`}) + if err != nil { + return err + } + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + By("Waiting for deleted metrics disappear on grafana console") + Eventually(func() error { + for _, cluster := range clusters { + query := fmt.Sprintf("timestamp(instance:node_num_cpu:sum{cluster=\"%s\"}) - timestamp(instance:node_num_cpu:sum{cluster=\"%s\"} offset 1m) > 59", + cluster, cluster) + metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) + if metricslistError == nil { + return nil + } + } + return metricslistError + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) + }) + + It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + By("Waiting for deleted metrics disappear on grafana console") + Eventually(func() error { + for _, cluster := range clusters { + query := fmt.Sprintf("timestamp(go_goroutines{cluster=\"%s\"}) - timestamp(go_goroutines{cluster=\"%s\"} offset 1m) > 59", + cluster, cluster) + metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) + if metricslistError == nil { + return nil + } + } + return metricslistError + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) + }) + + It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + By("Deleting custom metrics allowlist configmap") + Eventually(func() error { + err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Delete(context.TODO(), allowlistCMname, metav1.DeleteOptions{}) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + By("Waiting for new added metrics disappear on grafana console") + Eventually(func() error { + for _, cluster := range clusters { + query := fmt.Sprintf("timestamp(node_memory_Active_bytes{cluster=\"%s\"}) - timestamp(node_memory_Active_bytes{cluster=\"%s\"} offset 1m) > 59", + cluster, cluster) + metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) + if metricslistError == nil { + return nil + } + } + return metricslistError + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) + }) + + It("[P2][Sev2][Observability][Integration] Should have metrics which used grafana dashboard (ssli/g1)", func() { + metricList := utils.GetDefaultMetricList(testOptions) + ignoreMetricMap := utils.GetIgnoreMetricMap() + for _, name := range metricList { + _, ok := ignoreMetricMap[name] + if !ok { + Eventually(func() error { + err, _ := utils.ContainManagedClusterMetric(testOptions, name, []string{name}) + return err + }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) + } + } + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go new file mode 100644 index 000000000..a726575c1 --- /dev/null +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -0,0 +1,88 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + Context("[P1][Sev1][Observability] Verify Observatorium CR configuration compliance (observatorium_preserve/g0) -", func() { + It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { + oldResourceVersion := "" + updateRetention := "10d" + Eventually(func() error { + cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + return err + } + cr.Object["spec"].(map[string]interface{})["thanos"].(map[string]interface{})["compact"].(map[string]interface{})["retentionResolution1h"] = updateRetention + oldResourceVersion = cr.Object["metadata"].(map[string]interface{})["resourceVersion"].(string) + _, err = dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Update(context.TODO(), cr, metav1.UpdateOptions{}) + return err + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) + + Eventually(func() bool { + cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err == nil { + replicasNewRetention := cr.Object["spec"].(map[string]interface{})["thanos"].(map[string]interface{})["compact"].(map[string]interface{})["retentionResolution1h"] + newResourceVersion := cr.Object["metadata"].(map[string]interface{})["resourceVersion"].(string) + if newResourceVersion != oldResourceVersion && + replicasNewRetention != updateRetention { + return true + } + } + return false + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*1).Should(BeTrue()) + + // wait for pod restarting + time.Sleep(10 * time.Second) + + By("Wait for thanos compact pods are ready") + sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + Expect(len(sts.Items)).NotTo(Equal(0)) + // ensure the thanos rule pods are restarted successfully before processing + Eventually(func() error { + err = utils.CheckStatefulSetPodReady(testOptions, (*sts).Items[0].Name) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go new file mode 100644 index 000000000..e263cb26c --- /dev/null +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -0,0 +1,208 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var ( + EventuallyTimeoutMinute time.Duration = 60 * time.Second + EventuallyIntervalSecond time.Duration = 1 * time.Second + + hubClient kubernetes.Interface + dynClient dynamic.Interface + err error +) + +var _ = Describe("Observability:", func() { + + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR (reconcile/g0)", func() { + By("Modifying MCO CR for reconciling") + err := utils.ModifyMCOCR(testOptions) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for MCO retentionResolutionRaw filed to take effect") + advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) + if !advRetentionCon { + Skip("Skip the case since " + err.Error()) + } + Eventually(func() error { + compacts, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_COMPACT_LABEL, + }) + Expect(len(compacts.Items)).NotTo(Equal(0)) + + argList := (*compacts).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--retention.resolution-raw=3d" { + return nil + } + } + return fmt.Errorf("Failed to find modified retention field, the current args is: %v", argList) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Wait for thanos compact pods are ready") + compacts, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_COMPACT_LABEL, + }) + Expect(len(compacts.Items)).NotTo(Equal(0)) + + // ensure the thanos rule pods are restarted successfully before processing + Eventually(func() error { + err = utils.CheckStatefulSetPodReady(testOptions, (*compacts).Items[0].Name) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Wait for alertmanager pods are ready") + alertmans, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: ALERTMANAGER_LABEL, + }) + Expect(len(alertmans.Items)).NotTo(Equal(0)) + + // ensure the thanos rule pods are restarted successfully before processing + Eventually(func() error { + err = utils.CheckStatefulSetPodReady(testOptions, (*alertmans).Items[0].Name) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Verify nodeSelector setting effects for Observability components (reconcile/g0)", func() { + By("Checking node selector spec in MCO CR") + mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + spec := mcoSC.Object["spec"].(map[string]interface{}) + if _, ok := spec["nodeSelector"]; !ok { + Skip("Skip the case since the MCO CR did not set the nodeSelector") + } + + By("Checking node selector for all pods") + Eventually(func() error { + err = utils.CheckAllPodNodeSelector(testOptions, spec["nodeSelector"].(map[string]interface{})) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check affinity rule takes effect on Observability components (reconcile/g0)", func() { + By("Checking podAntiAffinity for all pods") + Eventually(func() error { + err := utils.CheckAllPodsAffinity(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Customize the Observability components storage size (reconcile/g0)", func() { + By("Resizing alertmanager storage") + alertmans, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: ALERTMANAGER_LABEL, + }) + Expect(len(alertmans.Items)).NotTo(Equal(0)) + + Eventually(func() error { + err := utils.CheckStorageResize(testOptions, (*alertmans).Items[0].Name, "2Gi") + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Revert MCO CR changes (reconcile/g0)", func() { + advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) + if !advRetentionCon { + Skip("Skip the case since " + err.Error()) + } + By("Revert MCO CR changes") + err = utils.RevertMCOCRModification(testOptions) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for MCO retentionResolutionRaw filed to take effect") + Eventually(func() error { + compacts, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_COMPACT_LABEL, + }) + Expect(len(compacts.Items)).NotTo(Equal(0)) + + argList := (*compacts).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--retention.resolution-raw=5d" { + return nil + } + } + return fmt.Errorf("Failed to find modified retention field, the current args is: %v", argList) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Wait for thanos compact pods are ready") + // ensure the thanos rule pods are restarted successfully before processing + compacts, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_COMPACT_LABEL, + }) + Expect(len(compacts.Items)).NotTo(Equal(0)) + + Eventually(func() error { + err = utils.CheckStatefulSetPodReady(testOptions, (*compacts).Items[0].Name) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Checking MCO components in default HA mode") + Eventually(func() error { + err = utils.CheckMCOComponents(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*15, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go new file mode 100644 index 000000000..917b78395 --- /dev/null +++ b/tests/pkg/tests/observability_retention_test.go @@ -0,0 +1,169 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + "math" + "reflect" + "strconv" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + + var ( + deleteDelay = "48h" + retentionInLocal = "24h" + blockDuration = "2h" + ignoreDeletionMarksDelay = "24h" + ) + + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + + if _, adv := mcoRes.Object["spec"].(map[string]interface{})["advanced"]; adv { + if _, rec := mcoRes.Object["spec"].(map[string]interface{})["advanced"].(map[string]interface{})["retentionConfig"]; rec { + for k, v := range mcoRes.Object["spec"].(map[string]interface{})["advanced"].(map[string]interface{})["retentionConfig"].(map[string]interface{}) { + switch k { + case "deleteDelay": + deleteDelay = reflect.ValueOf(v).String() + idmk, _ := strconv.Atoi(deleteDelay[:len(deleteDelay)-1]) + ignoreDeletionMarksDelay = fmt.Sprintf("%.f", math.Ceil(float64(idmk)/float64(2))) + deleteDelay[len(deleteDelay)-1:] + case "retentionInLocal": + retentionInLocal = reflect.ValueOf(v).String() + case "blockDuration": + blockDuration = reflect.ValueOf(v).String() + } + } + } + } + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check compact args (retention/g0):", func() { + By("--delete-delay=" + deleteDelay) + Eventually(func() error { + compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_COMPACT_LABEL, + }) + if err != nil { + return err + } + argList := (*compacts).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--delete-delay="+deleteDelay { + return nil + } + } + return fmt.Errorf("Failed to check compact args: --delete-delay="+deleteDelay+". args is %v", argList) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check store args (retention/g0):", func() { + By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) + Eventually(func() error { + stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_STORE_LABEL, + }) + if err != nil { + return err + } + argList := (*stores).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--ignore-deletion-marks-delay="+ignoreDeletionMarksDelay { + return nil + } + } + return fmt.Errorf("Failed to check store args: --ignore-deletion-marks-delay="+ignoreDeletionMarksDelay+". The args is: %v", argList) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check receive args (retention/g0):", func() { + By("--tsdb.retention=" + retentionInLocal) + Eventually(func() error { + receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RECEIVE_LABEL, + }) + if err != nil { + return err + } + argList := (*receives).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--tsdb.retention="+retentionInLocal { + return nil + } + } + return fmt.Errorf("Failed to check receive args: --tsdb.retention="+retentionInLocal+". The args is: %v", argList) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check rule args (retention/g0):", func() { + By("--tsdb.retention=" + retentionInLocal) + Eventually(func() error { + rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RULE_LABEL, + }) + if err != nil { + return err + } + argList := (*rules).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--tsdb.retention="+retentionInLocal { + return nil + } + } + return fmt.Errorf("Failed to check rule args: --tsdb.retention="+retentionInLocal+". The args is: %v", argList) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check rule args (retention/g0):", func() { + By("--tsdb.block-duration=" + blockDuration) + Eventually(func() error { + rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ + LabelSelector: THANOS_RULE_LABEL, + }) + if err != nil { + return err + } + argList := (*rules).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg == "--tsdb.block-duration="+blockDuration { + return nil + } + } + return fmt.Errorf("Failed to check rule args: --tsdb.block-duration="+blockDuration+". The args is: %v", argList) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + JustAfterEach(func() { + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go new file mode 100644 index 000000000..3e6863801 --- /dev/null +++ b/tests/pkg/tests/observability_route_test.go @@ -0,0 +1,201 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var ( + alertCreated bool = false +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("@BVT - [P1][Sev1][Observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { + Eventually(func() error { + query := "/api/v1/query?query=cluster_version" + url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query + req, err := http.NewRequest( + "GET", + url, + nil) + klog.V(5).Infof("request URL: %s\n", url) + if err != nil { + return err + } + caCrt, err := utils.GetRouterCA(hubClient) + Expect(err).NotTo(HaveOccurred()) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(caCrt) + tr := &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + } + + client := &http.Client{} + if os.Getenv("IS_KIND_ENV") != "true" { + client.Transport = tr + req.Header.Set("Authorization", "Bearer "+BearerToken) + } + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("resp: %+v\n", resp) + klog.Errorf("err: %+v\n", err) + return fmt.Errorf("Failed to access metrics via via rbac-query-proxy route") + } + + metricResult, err := ioutil.ReadAll(resp.Body) + klog.V(5).Infof("metricResult: %s\n", metricResult) + if err != nil { + return err + } + + if !strings.Contains(string(metricResult), "cluster_version") { + return fmt.Errorf("Failed to find metric name from response") + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("@BVT - [P1][Sev1][Observability][Integration] Should access alert via alertmanager route (route/g0)", func() { + Eventually(func() error { + query := "/api/v2/alerts" + url := "https://alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query + alertJson := ` + [ + { + "annotations":{ + "description":"just for mco e2e testing", + "summary":"an alert that is for mco e2e testing" + }, + "receivers":[ + { + "name":"mco-e2e" + } + ], + "labels":{ + "alertname":"mco-e2e", + "cluster":"testCluster", + "severity":"none" + } + } + ] + ` + alertPostReq, err := http.NewRequest( + "Post", + url, + bytes.NewBuffer([]byte(alertJson))) + alertPostReq.Header.Set("Content-Type", "application/json; charset=UTF-8") + klog.V(5).Infof("request URL: %s\n", url) + if err != nil { + return err + } + + caCrt, err := utils.GetRouterCA(hubClient) + Expect(err).NotTo(HaveOccurred()) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(caCrt) + tr := &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + } + + client := &http.Client{} + if os.Getenv("IS_KIND_ENV") != "true" { + client.Transport = tr + alertPostReq.Header.Set("Authorization", "Bearer "+BearerToken) + } + if !alertCreated { + resp, err := client.Do(alertPostReq) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("resp: %+v\n", resp) + klog.Errorf("err: %+v\n", err) + return fmt.Errorf("Failed to create alert via alertmanager route") + } + } + + alertCreated = true + alertGetReq, err := http.NewRequest( + "GET", + url, + nil) + klog.V(5).Infof("request URL: %s\n", url) + + if err != nil { + return err + } + + if os.Getenv("IS_KIND_ENV") != "true" { + alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) + } + + resp, err := client.Do(alertGetReq) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("resp: %+v\n", resp) + klog.Errorf("err: %+v\n", err) + return fmt.Errorf("Failed to access alert via alertmanager route") + } + + alertResult, err := ioutil.ReadAll(resp.Body) + klog.V(5).Infof("alertResult: %s\n", alertResult) + if err != nil { + return err + } + + if !strings.Contains(string(alertResult), "mco-e2e") { + return fmt.Errorf("Failed to found alert from alertResult: %s", alertResult) + } + + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/tests/observability_uninstall_test.go b/tests/pkg/tests/observability_uninstall_test.go new file mode 100644 index 000000000..dcf4c4114 --- /dev/null +++ b/tests/pkg/tests/observability_uninstall_test.go @@ -0,0 +1,86 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + "fmt" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +func uninstallMCO() { + if os.Getenv("SKIP_UNINSTALL_STEP") == "true" { + return + } + + hubClient := utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient := utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + By("Deleteing the MCO testing RBAC resources") + Expect(utils.DeleteMCOTestingRBAC(testOptions)).NotTo(HaveOccurred()) + + By("Uninstall MCO instance") + err := utils.UninstallMCO(testOptions) + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for delete all MCO components") + Eventually(func() error { + var podList, _ = hubClient.CoreV1().Pods(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if len(podList.Items) != 0 { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for delete MCO addon instance") + Eventually(func() error { + name := MCO_CR_NAME + "-addon" + clientDynamic := utils.GetKubeClientDynamic(testOptions, false) + // should check oba instance from managedcluster + instance, _ := clientDynamic.Resource(utils.NewMCOAddonGVR()).Namespace(MCO_ADDON_NAMESPACE).Get(context.TODO(), name, metav1.GetOptions{}) + if instance != nil { + utils.PrintManagedClusterOBAObject(testOptions) + return fmt.Errorf("Failed to delete MCO addon instance") + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for delete manifestwork") + Eventually(func() error { + name := "endpoint-observability-work" + _, err := dynClient.Resource(utils.NewOCMManifestworksGVR()).Namespace("local-cluster").Get(context.TODO(), name, metav1.GetOptions{}) + return err + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(MatchError(`manifestworks.work.open-cluster-management.io "endpoint-observability-work" not found`)) + + By("Waiting for delete all MCO addon components") + Eventually(func() error { + var podList, _ = hubClient.CoreV1().Pods(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if len(podList.Items) != 0 { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for delete MCO namespaces") + Eventually(func() error { + err := hubClient.CoreV1().Namespaces().Delete(context.TODO(), MCO_NAMESPACE, metav1.DeleteOptions{}) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) +} diff --git a/tests/pkg/utils/client.go b/tests/pkg/utils/client.go new file mode 100644 index 000000000..322ba71d3 --- /dev/null +++ b/tests/pkg/utils/client.go @@ -0,0 +1,55 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" +) + +func getKubeClient(opt TestOptions, isHub bool) kubernetes.Interface { + clientKube := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + if !isHub && len(opt.ManagedClusters) > 0 { + clientKube = NewKubeClient( + opt.ManagedClusters[0].ClusterServerURL, + opt.ManagedClusters[0].KubeConfig, + opt.ManagedClusters[0].KubeContext) + klog.V(1).Infof("New kubeclient for managedcluster <%v>", opt.ManagedClusters[0].Name) + } + return clientKube +} + +func GetKubeClientDynamic(opt TestOptions, isHub bool) dynamic.Interface { + url := opt.HubCluster.ClusterServerURL + kubeConfig := opt.KubeConfig + kubeContext := opt.HubCluster.KubeContext + if !isHub && len(opt.ManagedClusters) > 0 { + url = opt.ManagedClusters[0].ClusterServerURL + kubeConfig = opt.ManagedClusters[0].KubeConfig + kubeContext = opt.ManagedClusters[0].KubeContext + } + + config, err := LoadConfig(url, kubeConfig, kubeContext) + if err != nil { + panic(err) + } + + clientset, err := dynamic.NewForConfig(config) + if err != nil { + panic(err) + } + + return clientset +} + +func GetManagedClusterName(opt TestOptions) string { + if len(opt.ManagedClusters) > 0 { + return opt.ManagedClusters[0].Name + } + return "" +} diff --git a/tests/pkg/utils/cluster_deploy.go b/tests/pkg/utils/cluster_deploy.go new file mode 100644 index 000000000..b07075686 --- /dev/null +++ b/tests/pkg/utils/cluster_deploy.go @@ -0,0 +1,43 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +// ClusterDeploy defines the data passed to Hive +type ClusterDeploy struct { + Kind string `yaml:"kind"` + APIVersion string `yaml:"apiVersion"` + Items []Items `yaml:"items"` +} + +// Items defines the list of items in the cluster deploy yaml +type Items struct { + Kind string `yaml:"kind"` + Metadata Metadata `yaml:"metadata"` + StringData StringData `yaml:"stringData,omitempty"` + Spec Spec `yaml:"spec,omitempty"` +} + +// Metadata defines the name +type Metadata struct { + Name string `yaml:"name,omitempty"` +} + +// StringData defiines the ssh values +type StringData struct { + Dockerconfigjson string `yaml:".dockerconfigjson,omitempty"` + SSHPrivateKey string `yaml:"ssh-privatekey,omitempty"` +} + +// Spec defines the kube specifications +type Spec struct { + BaseDomain string `yaml:"baseDomain,omitempty"` + ClusterName string `yaml:"clusterName,omitempty"` + Provisioning Provisioning `yaml:"provisioning,omitempty"` +} + +// Provisioning defines the data related to cluster creation +type Provisioning struct { + ReleaseImage string `yaml:"releaseImage,omitempty"` + SSHKnownHosts []string `yaml:"sshKnownHosts,omitempty"` +} diff --git a/tests/pkg/utils/install_config.go b/tests/pkg/utils/install_config.go new file mode 100644 index 000000000..70dad2f64 --- /dev/null +++ b/tests/pkg/utils/install_config.go @@ -0,0 +1,55 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +// InstallConfig definition for install config structure from install-config.yaml +type InstallConfig struct { + BaseDomain string `yaml:"baseDomain,omitempty"` + Networking Networking `yaml:"networking,omitempty"` + Metadata Metadata `yaml:"metadata"` + Platform Platform `yaml:"platform,omitempty"` + PullSecret string `yaml:"pullSecret,omitempty"` + SSHKey string `yaml:"sshKey,omitempty"` +} + +// Networking definition +type Networking struct { + NetworkType string `yaml:"networkType"` + MachineCIDR string `yaml:"machineCIDR"` +} + +// Platform definition +type Platform struct { + Baremetal Baremetal `yaml:"baremetal,omitempty"` +} + +// Baremetal specs for target baremetal provisioning +type Baremetal struct { + ExternalBridge string `yaml:"externalBridge,omitempty"` + ProvisioningBridge string `yaml:"provisioningBridge,omitempty"` + LibvirtURI string `yaml:"libvirtURI,omitempty"` + ProvisioningNetworkInterface string `yaml:"provisioningNetworkInterface,omitempty"` + ProvisioningNetworkCIDR string `yaml:"provisioningNetworkCIDR,omitempty"` + APIVIP string `yaml:"apiVIP,omitempty"` + DNSVIP string `yaml:"dnsVIP,omitempty"` + IngressVIP string `yaml:"ingressVIP,omitempty"` + Hosts []Host `yaml:"hosts,omitempty"` + SSHKnownHosts string `yaml:"sshKnownHosts,omitempty"` +} + +// Host is an array of baremetal assets +type Host struct { + Name string `yaml:"name"` + Role string `yaml:"role"` + Bmc Bmc `yaml:"bmc"` + BootMACAddress string `yaml:"bootMACAddress"` + HardwareProfile string `yaml:"hardwareProfile"` +} + +// Bmc definition +type Bmc struct { + Address string `yaml:"address"` + Username string `yaml:"username"` + Password string `yaml:"password"` +} diff --git a/tests/pkg/utils/mco_cert_secret.go b/tests/pkg/utils/mco_cert_secret.go new file mode 100644 index 000000000..09d22e8b1 --- /dev/null +++ b/tests/pkg/utils/mco_cert_secret.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +const ( + ServerCACerts = "observability-server-ca-certs" + ClientCACerts = "observability-client-ca-certs" + ServerCerts = "observability-server-certs" + GrafanaCerts = "observability-grafana-certs" +) + +func DeleteCertSecret(opt TestOptions) error { + clientKube := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + klog.V(1).Infof("Delete certificate secret") + err := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), ServerCACerts, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete certificate secret %s due to %v", ServerCACerts, err) + return err + } + err = clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), ClientCACerts, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete certificate secret %s due to %v", ClientCACerts, err) + return err + } + err = clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), ServerCerts, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete certificate secret %s due to %v", ServerCerts, err) + return err + } + err = clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), GrafanaCerts, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete certificate secret %s due to %v", GrafanaCerts, err) + return err + } + return err +} diff --git a/tests/pkg/utils/mco_clusterrolebinding.go b/tests/pkg/utils/mco_clusterrolebinding.go new file mode 100644 index 000000000..b491c3a63 --- /dev/null +++ b/tests/pkg/utils/mco_clusterrolebinding.go @@ -0,0 +1,57 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func GetCRB(opt TestOptions, isHub bool, name string) (error, *rbacv1.ClusterRoleBinding) { + clientKube := getKubeClient(opt, isHub) + crb, err := clientKube.RbacV1().ClusterRoleBindings().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get cluster rolebinding %s due to %v", name, err) + } + return err, crb +} + +func DeleteCRB(opt TestOptions, isHub bool, name string) error { + clientKube := getKubeClient(opt, isHub) + err := clientKube.RbacV1().ClusterRoleBindings().Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete cluster rolebinding %s due to %v", name, err) + } + return err +} + +func UpdateCRB(opt TestOptions, isHub bool, name string, + crb *rbacv1.ClusterRoleBinding) (error, *rbacv1.ClusterRoleBinding) { + clientKube := getKubeClient(opt, isHub) + updateCRB, err := clientKube.RbacV1().ClusterRoleBindings().Update(context.TODO(), crb, metav1.UpdateOptions{}) + if err != nil { + klog.Errorf("Failed to update cluster rolebinding %s due to %v", name, err) + } + return err, updateCRB +} + +func CreateCRB(opt TestOptions, isHub bool, + crb *rbacv1.ClusterRoleBinding) error { + clientKube := getKubeClient(opt, isHub) + _, err := clientKube.RbacV1().ClusterRoleBindings().Create(context.TODO(), crb, metav1.CreateOptions{}) + if err != nil { + if errors.IsAlreadyExists(err) { + klog.V(1).Infof("clusterrolebinding %s already exists, updating...", crb.GetName()) + err, _ := UpdateCRB(opt, isHub, crb.GetName(), crb) + return err + } + klog.Errorf("Failed to create cluster rolebinding %s due to %v", crb.GetName(), err) + return err + } + return nil +} diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go new file mode 100644 index 000000000..917e572d2 --- /dev/null +++ b/tests/pkg/utils/mco_configmaps.go @@ -0,0 +1,53 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func CreateConfigMap(opt TestOptions, isHub bool, cm *corev1.ConfigMap) error { + clientKube := getKubeClient(opt, isHub) + found, err := clientKube.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(context.TODO(), cm.ObjectMeta.Name, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + _, err := clientKube.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + if err == nil { + klog.V(1).Infof("configmap %s created", cm.ObjectMeta.Name) + } + return err + } + if err != nil { + return err + } + cm.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + _, err = clientKube.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) + if err == nil { + klog.V(1).Infof("configmap %s updated", cm.ObjectMeta.Name) + } + return err +} + +func GetConfigMap(opt TestOptions, isHub bool, name string, + namespace string) (error, *corev1.ConfigMap) { + clientKube := getKubeClient(opt, isHub) + cm, err := clientKube.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get configmap %s in namespace %s due to %v", name, namespace, err) + } + return err, cm +} + +func DeleteConfigMap(opt TestOptions, isHub bool, name string, namespace string) error { + clientKube := getKubeClient(opt, isHub) + err := clientKube.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete configmap %s in namespace %s due to %v", name, namespace, err) + } + return err +} diff --git a/tests/pkg/utils/mco_dashboard.go b/tests/pkg/utils/mco_dashboard.go new file mode 100644 index 000000000..347278c67 --- /dev/null +++ b/tests/pkg/utils/mco_dashboard.go @@ -0,0 +1,70 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "k8s.io/klog" +) + +func ContainDashboard(opt TestOptions, title string) (error, bool) { + grafanaConsoleURL := GetGrafanaURL(opt) + path := "/api/search?" + queryParams := url.PathEscape(fmt.Sprintf("query=%s", title)) + req, err := http.NewRequest( + "GET", + grafanaConsoleURL+path+queryParams, + nil) + if err != nil { + return err, false + } + + client := &http.Client{} + if os.Getenv("IS_KIND_ENV") != "true" { + tr := &http.Transport{ + /* #nosec */ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + + client = &http.Client{Transport: tr} + token, err := FetchBearerToken(opt) + if err != nil { + return err, false + } + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + req.Host = opt.HubCluster.GrafanaHost + } + + resp, err := client.Do(req) + if err != nil { + return err, false + } + + if resp.StatusCode != http.StatusOK { + klog.Errorf("resp: %+v\n", resp) + klog.Errorf("err: %+v\n", err) + return fmt.Errorf("failed to access grafana api"), false + } + + result, err := ioutil.ReadAll(resp.Body) + klog.V(1).Infof("result: %s\n", result) + if err != nil { + return err, false + } + + if !strings.Contains(string(result), fmt.Sprintf(`"title":"%s"`, title)) { + return fmt.Errorf("failed to find the dashboard"), false + } else { + return nil, true + } +} diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go new file mode 100644 index 000000000..8ceb768ea --- /dev/null +++ b/tests/pkg/utils/mco_deploy.go @@ -0,0 +1,907 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + b64 "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "k8s.io/klog" +) + +const ( + MCO_CR_NAME = "observability" + MCO_COMPONENT_LABEL = "observability.open-cluster-management.io/name=" + MCO_CR_NAME + OBSERVATORIUM_COMPONENT_LABEL = "app.kubernetes.io/part-of=observatorium" + MCO_NAMESPACE = "open-cluster-management-observability" + MCO_ADDON_NAMESPACE = "open-cluster-management-addon-observability" + MCO_PULL_SECRET_NAME = "multiclusterhub-operator-pull-secret" + OBJ_SECRET_NAME = "thanos-object-storage" // #nosec + MCO_GROUP = "observability.open-cluster-management.io" + OCM_WORK_GROUP = "work.open-cluster-management.io" + OCM_CLUSTER_GROUP = "cluster.open-cluster-management.io" + OCM_ADDON_GROUP = "addon.open-cluster-management.io" +) + +func NewMCOGVRV1BETA1() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: MCO_GROUP, + Version: "v1beta1", + Resource: "multiclusterobservabilities"} +} + +func NewMCOGVRV1BETA2() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: MCO_GROUP, + Version: "v1beta2", + Resource: "multiclusterobservabilities"} +} + +func NewMCOAddonGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: MCO_GROUP, + Version: "v1beta1", + Resource: "observabilityaddons"} +} + +func NewOCMManifestworksGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: OCM_WORK_GROUP, + Version: "v1", + Resource: "manifestworks"} +} + +func NewOCMManagedClustersGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: OCM_CLUSTER_GROUP, + Version: "v1", + Resource: "managedclusters"} +} + +func NewMCOClusterManagementAddonsGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: OCM_ADDON_GROUP, + Version: "v1alpha1", + Resource: "clustermanagementaddons"} +} + +func NewMCOManagedClusterAddonsGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: OCM_ADDON_GROUP, + Version: "v1alpha1", + Resource: "managedclusteraddons"} +} + +func NewMCOMObservatoriumGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "core.observatorium.io", + Version: "v1alpha1", + Resource: "observatoria"} +} + +func NewOCMMultiClusterHubGVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "operator.open-cluster-management.io", + Version: "v1", + Resource: "multiclusterhubs"} +} + +func ModifyMCOAvailabilityConfig(opt TestOptions, availabilityConfig string) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + + spec := mco.Object["spec"].(map[string]interface{}) + spec["availabilityConfig"] = availabilityConfig + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func GetAllMCOPods(opt TestOptions) ([]corev1.Pod, error) { + hubClient := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + podList, err := hubClient.CoreV1().Pods(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return []corev1.Pod{}, err + } + + // ignore non-mco pods + mcoPods := []corev1.Pod{} + for _, p := range podList.Items { + if strings.Contains(p.GetName(), "grafana-test") { + continue + } + + if strings.Contains(p.GetName(), "minio") { + continue + } + + mcoPods = append(mcoPods, p) + } + + return mcoPods, nil +} + +func PrintAllMCOPodsStatus(opt TestOptions) { + podList, err := GetAllMCOPods(opt) + if err != nil { + klog.Errorf("Failed to get all MCO pods") + } + + if len(podList) == 0 { + klog.V(1).Infof("Failed to get pod in <%s> namespace", MCO_NAMESPACE) + } + + klog.V(1).Infof("Get <%v> pods in <%s> namespace", len(podList), MCO_NAMESPACE) + for _, pod := range podList { + isReady := false + if pod.Status.Phase == corev1.PodRunning { + isReady = true + break + } + + // only print not ready pod status + if !isReady { + klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", pod.Name, pod.Status.Phase, pod.Status) + } + } +} + +func PrintMCOObject(opt TestOptions) { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.V(1).Infof("Failed to get mco object") + return + } + + spec, _ := json.MarshalIndent(mco.Object["spec"], "", " ") + status, _ := json.MarshalIndent(mco.Object["status"], "", " ") + klog.V(1).Infof("MCO spec: %+v\n", string(spec)) + klog.V(1).Infof("MCO status: %+v\n", string(status)) +} + +func PrintManagedClusterOBAObject(opt TestOptions) { + clientDynamic := GetKubeClientDynamic(opt, false) + oba, getErr := clientDynamic.Resource(NewMCOAddonGVR()).Namespace(MCO_ADDON_NAMESPACE).Get(context.TODO(), "observability-addon", metav1.GetOptions{}) + if getErr != nil { + klog.V(1).Infof("Failed to get oba object from managedcluster") + return + } + + spec, _ := json.MarshalIndent(oba.Object["spec"], "", " ") + status, _ := json.MarshalIndent(oba.Object["status"], "", " ") + klog.V(1).Infof("OBA spec: %+v\n", string(spec)) + klog.V(1).Infof("OBA status: %+v\n", string(status)) +} + +func GetAllOBAPods(opt TestOptions) ([]corev1.Pod, error) { + clientKube := getKubeClient(opt, false) + obaPods, err := clientKube.CoreV1().Pods(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return []corev1.Pod{}, err + } + + return obaPods.Items, nil +} + +func PrintAllOBAPodsStatus(opt TestOptions) { + podList, err := GetAllOBAPods(opt) + if err != nil { + klog.Errorf("Failed to get all OBA pods") + } + + if len(podList) == 0 { + klog.V(1).Infof("Failed to get pod in <%s> namespace from managedcluster", MCO_ADDON_NAMESPACE) + } + + klog.V(1).Infof("Get <%v> pods in <%s> namespace from managedcluster", len(podList), MCO_ADDON_NAMESPACE) + + for _, pod := range podList { + isReady := false + if pod.Status.Phase == corev1.PodRunning { + isReady = true + break + } + + // only print not ready pod status + if !isReady { + klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", pod.Name, pod.Status.Phase, pod.Status) + } + } +} + +func CheckAllPodNodeSelector(opt TestOptions, nodeSelector map[string]interface{}) error { + podList, err := GetAllMCOPods(opt) + if err != nil { + return err + } + + for k, v := range nodeSelector { + for _, pod := range podList { + selecterValue, ok := pod.Spec.NodeSelector[k] + if !ok || selecterValue != v { + return fmt.Errorf("failed to check node selector with %s=%s for pod: %v", k, v, pod.GetName()) + } + } + } + + return nil +} + +func CheckAllPodsAffinity(opt TestOptions) error { + podList, err := GetAllMCOPods(opt) + if err != nil { + return err + } + + for _, pod := range podList { + + if pod.Spec.Affinity == nil { + return fmt.Errorf("Failed to check affinity for pod: %v" + pod.GetName()) + } + + weightedPodAffinityTerms := pod.Spec.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution + for _, weightedPodAffinityTerm := range weightedPodAffinityTerms { + topologyKey := weightedPodAffinityTerm.PodAffinityTerm.TopologyKey + if (topologyKey == "kubernetes.io/hostname" && weightedPodAffinityTerm.Weight == 30) || + (topologyKey == "topology.kubernetes.io/zone" && weightedPodAffinityTerm.Weight == 70) { + } else { + return fmt.Errorf("failed to check affinity for pod: %v" + pod.GetName()) + } + } + } + return nil +} + +func CheckStorageResize(opt TestOptions, stsName string, expectedCapacity string) error { + client := getKubeClient(opt, true) + statefulsets := client.AppsV1().StatefulSets(MCO_NAMESPACE) + statefulset, err := statefulsets.Get(context.TODO(), stsName, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("Error while retrieving statefulset %s: %s", stsName, err.Error()) + return err + } + vct := statefulset.Spec.VolumeClaimTemplates[0] + if !vct.Spec.Resources.Requests["storage"].Equal(resource.MustParse(expectedCapacity)) { + err = fmt.Errorf("the storage size of statefulset %s should have %s but got %v", + stsName, expectedCapacity, + vct.Spec.Resources.Requests["storage"]) + return err + } + return nil +} + +func CheckOBAComponents(opt TestOptions) error { + client := getKubeClient(opt, false) + deployments := client.AppsV1().Deployments(MCO_ADDON_NAMESPACE) + expectedDeploymentNames := []string{ + "endpoint-observability-operator", + "metrics-collector-deployment", + } + + for _, deploymentName := range expectedDeploymentNames { + deployment, err := deployments.Get(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Error while retrieving deployment %s: %s", deploymentName, err.Error()) + return err + } + + if deployment.Status.ReadyReplicas != *deployment.Spec.Replicas { + err = fmt.Errorf("deployment %s should have %d but got %d ready replicas", + deploymentName, + *deployment.Spec.Replicas, + deployment.Status.ReadyReplicas) + return err + } + } + + return nil +} + +func CheckMCOComponents(opt TestOptions) error { + client := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + deployments := client.AppsV1().Deployments(MCO_NAMESPACE) + expectedDeploymentLabels := []string{ + "app=multicluster-observability-grafana", + "app.kubernetes.io/name=observatorium-api", + "app.kubernetes.io/name=thanos-query", + "app.kubernetes.io/name=thanos-query-frontend", + "app.kubernetes.io/name=thanos-receive-controller", + "app.kubernetes.io/name=observatorium-operator", + "app=rbac-query-proxy", + } + + for _, deploymentLabel := range expectedDeploymentLabels { + deployList, err := deployments.List(context.TODO(), metav1.ListOptions{ + LabelSelector: deploymentLabel, + }) + + if err != nil { + klog.Errorf("Error while listing deployment with label %s due to: %s", deploymentLabel, err.Error()) + return err + } + + if len((*deployList).Items) == 0 { + return fmt.Errorf("should have deployment created with label %s", deploymentLabel) + } + + for _, deployInfo := range (*deployList).Items { + if deployInfo.Status.ReadyReplicas != *deployInfo.Spec.Replicas { + err = fmt.Errorf("deployment %s should have %d but got %d ready replicas", + deployInfo.Name, + *deployInfo.Spec.Replicas, + deployInfo.Status.ReadyReplicas) + return err + } + } + } + + statefulsets := client.AppsV1().StatefulSets(MCO_NAMESPACE) + expectedStatefulsetLabels := []string{ + "app=multicluster-observability-alertmanager", + "app.kubernetes.io/name=thanos-compact", + "app.kubernetes.io/name=thanos-receive", + "app.kubernetes.io/name=thanos-rule", + "app.kubernetes.io/name=memcached", + "app.kubernetes.io/name=thanos-store", + } + + for _, statefulsetLabel := range expectedStatefulsetLabels { + statefulsetList, err := statefulsets.List(context.TODO(), metav1.ListOptions{ + LabelSelector: statefulsetLabel, + }) + if err != nil { + klog.V(1).Infof("Error while listing deployment with label %s due to: %s", statefulsetLabel, err.Error()) + return err + } + + if len((*statefulsetList).Items) == 0 { + return fmt.Errorf("should have statefulset created with label %s", statefulsetLabel) + } + + for _, statefulsetInfo := range (*statefulsetList).Items { + if statefulsetInfo.Status.ReadyReplicas != *statefulsetInfo.Spec.Replicas { + err = fmt.Errorf("statefulset %s should have %d but got %d ready replicas", + statefulsetInfo.Name, + *statefulsetInfo.Spec.Replicas, + statefulsetInfo.Status.ReadyReplicas) + return err + } + } + } + + return nil +} + +func CheckStatefulSetPodReady(opt TestOptions, stsName string) error { + client := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + statefulsets := client.AppsV1().StatefulSets(MCO_NAMESPACE) + statefulset, err := statefulsets.Get(context.TODO(), stsName, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("Error while retrieving statefulset %s: %s", stsName, err.Error()) + return err + } + + if statefulset.Status.ReadyReplicas != *statefulset.Spec.Replicas || + statefulset.Status.UpdatedReplicas != *statefulset.Spec.Replicas || + statefulset.Status.UpdateRevision != statefulset.Status.CurrentRevision { + err = fmt.Errorf("statefulset %s should have %d but got %d ready replicas", + stsName, *statefulset.Spec.Replicas, + statefulset.Status.ReadyReplicas) + return err + } + return nil +} + +func CheckDeploymentPodReady(opt TestOptions, deployName string) error { + client := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + deploys := client.AppsV1().Deployments(MCO_NAMESPACE) + deploy, err := deploys.Get(context.TODO(), deployName, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("Error while retrieving deployment %s: %s", deployName, err.Error()) + return err + } + + if deploy.Status.ReadyReplicas != *deploy.Spec.Replicas || + deploy.Status.UpdatedReplicas != *deploy.Spec.Replicas || + deploy.Status.AvailableReplicas != *deploy.Spec.Replicas { + err = fmt.Errorf("deployment %s should have %d but got %d ready replicas", + deployName, *deploy.Spec.Replicas, + deploy.Status.ReadyReplicas) + return err + } + return nil +} + +// ModifyMCOCR modifies the MCO CR for reconciling. modify multiple parameter to save running time +func ModifyMCOCR(opt TestOptions) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + spec := mco.Object["spec"].(map[string]interface{}) + storageConfig := spec["storageConfig"].(map[string]interface{}) + storageConfig["alertmanagerStorageSize"] = "2Gi" + + advRetentionCon, _ := CheckAdvRetentionConfig(opt) + if advRetentionCon { + retentionConfig := spec["advanced"].(map[string]interface{})["retentionConfig"].(map[string]interface{}) + retentionConfig["retentionResolutionRaw"] = "3d" + } + + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func CheckAdvRetentionConfig(opt TestOptions) (bool, error) { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return false, getErr + } + + spec := mco.Object["spec"].(map[string]interface{}) + if _, adv := spec["advanced"]; !adv { + return false, fmt.Errorf("the MCO CR did not have advanced spec configed") + } else { + advanced := spec["advanced"].(map[string]interface{}) + if _, rec := advanced["retentionConfig"]; !rec { + return false, fmt.Errorf("the MCO CR did not have advanced retentionConfig spec configed") + } else { + return true, nil + } + } +} + +// RevertMCOCRModification revert the previous changes +func RevertMCOCRModification(opt TestOptions) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + spec := mco.Object["spec"].(map[string]interface{}) + advRetentionCon, _ := CheckAdvRetentionConfig(opt) + if advRetentionCon { + retentionConfig := spec["advanced"].(map[string]interface{})["retentionConfig"].(map[string]interface{}) + retentionConfig["retentionResolutionRaw"] = "5d" + } + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func CheckMCOAddon(opt TestOptions) error { + client := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + if len(opt.ManagedClusters) > 0 { + client = NewKubeClient( + opt.ManagedClusters[0].ClusterServerURL, + opt.ManagedClusters[0].KubeConfig, + "") + } + expectedPodNames := []string{ + "endpoint-observability-operator", + "metrics-collector-deployment", + } + podList, err := client.CoreV1().Pods(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + podsn := make(map[string]corev1.PodPhase) + for _, pod := range podList.Items { + podsn[pod.Name] = pod.Status.Phase + } + for _, podName := range expectedPodNames { + exist := false + for key, value := range podsn { + if strings.HasPrefix(key, podName) && value == "Running" { + exist = true + } + } + if !exist { + return fmt.Errorf(podName + " not found") + } + } + return nil +} + +func CheckMCOAddonResources(opt TestOptions) error { + client := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + if len(opt.ManagedClusters) > 0 { + client = NewKubeClient( + opt.ManagedClusters[0].ClusterServerURL, + opt.ManagedClusters[0].KubeConfig, + "") + } + + deployList, err := client.AppsV1().Deployments(MCO_ADDON_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + + resMap := make(map[string]corev1.ResourceRequirements) + for _, deploy := range deployList.Items { + resMap[deploy.Name] = deploy.Spec.Template.Spec.Containers[0].Resources + } + + metricsCollectorRes := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("200m"), + "memory": resource.MustParse("700Mi"), + }, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("10m"), + "memory": resource.MustParse("100Mi"), + }, + } + + if !reflect.DeepEqual(resMap["metrics-collector-deployment"], metricsCollectorRes) { + return fmt.Errorf("metrics-collector-deployment resource <%v> is not equal <%v>", + resMap["metrics-collector-deployment"], + metricsCollectorRes) + } + + return nil +} + +func ModifyMCORetentionResolutionRaw(opt TestOptions) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + + spec := mco.Object["spec"].(map[string]interface{}) + advRetentionCon, _ := CheckAdvRetentionConfig(opt) + if advRetentionCon { + retentionConfig := spec["advanced"].(map[string]interface{})["retentionConfig"].(map[string]interface{}) + retentionConfig["retentionResolutionRaw"] = "3d" + } + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func GetMCOAddonSpecMetrics(opt TestOptions) (bool, error) { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return false, getErr + } + + enable := mco.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{})["enableMetrics"].(bool) + return enable, nil +} + +func ModifyMCOAddonSpecMetrics(opt TestOptions, enable bool) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + + observabilityAddonSpec := mco.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{}) + observabilityAddonSpec["enableMetrics"] = enable + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func ModifyMCOAddonSpecInterval(opt TestOptions, interval int64) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return getErr + } + + observabilityAddonSpec := mco.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{}) + observabilityAddonSpec["interval"] = interval + _, updateErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + return nil +} + +func GetMCOAddonSpecResources(opt TestOptions) (map[string]interface{}, error) { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + mco, getErr := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + return nil, getErr + } + + spec := mco.Object["spec"].(map[string]interface{}) + if _, addonSpec := spec["observabilityAddonSpec"]; !addonSpec { + return nil, fmt.Errorf("the MCO CR did not have observabilityAddonSpec spec configed") + } + + if _, resSpec := spec["observabilityAddonSpec"].(map[string]interface{})["resources"]; !resSpec { + return nil, fmt.Errorf("the MCO CR did not have observabilityAddonSpec.resources spec configed") + } + + res := spec["observabilityAddonSpec"].(map[string]interface{})["resources"].(map[string]interface{}) + return res, nil +} + +func DeleteMCOInstance(opt TestOptions, name string) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + return clientDynamic.Resource(NewMCOGVRV1BETA2()).Delete(context.TODO(), name, metav1.DeleteOptions{}) +} + +func CheckMCOConversion(opt TestOptions, v1beta1tov1beta2GoldenPath string) error { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + getMCO, err := clientDynamic.Resource(NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + return err + } + + decUnstructured := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + yamlB, err := ioutil.ReadFile(filepath.Clean(v1beta1tov1beta2GoldenPath)) + if err != nil { + return err + } + + expectedMCO := &unstructured.Unstructured{} + _, _, err = decUnstructured.Decode(yamlB, nil, expectedMCO) + if err != nil { + return err + } + + getMCOSpec := getMCO.Object["spec"].(map[string]interface{}) + expectedMCOSpec := expectedMCO.Object["spec"].(map[string]interface{}) + + for k, v := range expectedMCOSpec { + val, ok := getMCOSpec[k] + if !ok { + return fmt.Errorf("%s not found in ", k) + } + if !reflect.DeepEqual(val, v) { + return fmt.Errorf("%+v and %+v are not equal", val, v) + } + } + return nil +} + +func CreatePullSecret(opt TestOptions, mcoNs string) error { + clientKube := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + name, err := GetPullSecret(opt) + if err != nil { + return err + } + + pullSecret, errGet := clientKube.CoreV1().Secrets(mcoNs).Get(context.TODO(), name, metav1.GetOptions{}) + if errGet != nil { + return errGet + } + + pullSecret.ObjectMeta = metav1.ObjectMeta{ + Name: name, + Namespace: MCO_NAMESPACE, + } + klog.V(1).Infof("Create MCO pull secret") + _, err = clientKube.CoreV1().Secrets(pullSecret.Namespace).Create(context.TODO(), pullSecret, metav1.CreateOptions{}) + return err +} + +func CreateMCONamespace(opt TestOptions) error { + ns := fmt.Sprintf(`apiVersion: v1 +kind: Namespace +metadata: + name: %s`, + MCO_NAMESPACE) + klog.V(1).Infof("Create %s namespaces", MCO_NAMESPACE) + return Apply( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext, + []byte(ns)) +} + +func CreateObjSecret(opt TestOptions) error { + + bucket := os.Getenv("BUCKET") + if bucket == "" { + return fmt.Errorf("failed to get s3 BUCKET env") + } + + region := os.Getenv("REGION") + if region == "" { + return fmt.Errorf("failed to get s3 REGION env") + } + + accessKey := os.Getenv("AWS_ACCESS_KEY_ID") + if accessKey == "" { + return fmt.Errorf("failed to get aws AWS_ACCESS_KEY_ID env") + } + + secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secretKey == "" { + return fmt.Errorf("failed to get aws AWS_SECRET_ACCESS_KEY env") + } + + objSecret := fmt.Sprintf(`apiVersion: v1 +kind: Secret +metadata: + name: %s + namespace: %s +stringData: + thanos.yaml: | + type: s3 + config: + bucket: %s + endpoint: s3.%s.amazonaws.com + insecure: false + access_key: %s + secret_key: %s +type: Opaque`, + OBJ_SECRET_NAME, + MCO_NAMESPACE, + bucket, + region, + accessKey, + secretKey) + klog.V(1).Infof("Create MCO object storage secret") + return Apply( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext, + []byte(objSecret)) +} + +func UninstallMCO(opt TestOptions) error { + klog.V(1).Infof("Delete MCO instance") + deleteMCOErr := DeleteMCOInstance(opt, MCO_CR_NAME) + if deleteMCOErr != nil { + return deleteMCOErr + } + + clientKube := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + klog.V(1).Infof("Delete MCO object storage secret") + deleteObjSecretErr := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), OBJ_SECRET_NAME, metav1.DeleteOptions{}) + if deleteObjSecretErr != nil { + return deleteObjSecretErr + } + + return nil +} + +func CreateCustomAlertConfigYaml(baseDomain string) []byte { + global := fmt.Sprintf(`global: + resolve_timeout: 5m +route: + receiver: default-receiver + routes: + - match: + alertname: Watchdog + receiver: default-receiver + group_by: ['alertname', 'cluster'] + group_wait: 5s + group_interval: 5s + repeat_interval: 2m +receivers: + - name: default-receiver + slack_configs: + - api_url: https://hooks.slack.com/services/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 + channel: team-observability-test + footer: | + {{ .CommonLabels.cluster }} + mrkdwn_in: + - text + title: '[{{ .Status | toUpper }}] {{ .CommonLabels.alertname }} ({{ .CommonLabels.severity }})' + text: |- + {{ range .Alerts }} + *Alerts:* {{ .Annotations.summary }} + *Description:* {{ .Annotations.description }} + *Details:* + {{ range .Labels.SortedPairs }} • *{{ .Name }}:* {{ .Value }} + {{ end }} + {{ end }} + title_link: https://multicloud-console.apps.%s/grafana/explore?orgId=1&left=["now-1h","now","Observatorium",{"expr":"ALERTS{alertname=\"{{ .CommonLabels.alertname }}\"}","context":"explore"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}] +`, baseDomain) + encodedGlobal := b64.StdEncoding.EncodeToString([]byte(global)) + + instance := fmt.Sprintf(`kind: Secret +apiVersion: v1 +metadata: + name: alertmanager-config + namespace: open-cluster-management-observability +data: + alertmanager.yaml: >- + %s +`, encodedGlobal) + + return []byte(instance) +} diff --git a/tests/pkg/utils/mco_deployments.go b/tests/pkg/utils/mco_deployments.go new file mode 100644 index 000000000..4a8e2e7e3 --- /dev/null +++ b/tests/pkg/utils/mco_deployments.go @@ -0,0 +1,86 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + "errors" + + appv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func GetDeployment(opt TestOptions, isHub bool, name string, + namespace string) (*appv1.Deployment, error) { + clientKube := getKubeClient(opt, isHub) + klog.V(1).Infof("Get deployment <%v> in namespace <%v>, isHub: <%v>", name, namespace, isHub) + dep, err := clientKube.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get deployment %s in namespace %s due to %v", name, namespace, err) + } + return dep, err +} + +func GetDeploymentWithLabel(opt TestOptions, isHub bool, label string, + namespace string) (*appv1.DeploymentList, error) { + clientKube := getKubeClient(opt, isHub) + klog.V(1).Infof("Get get deployment with label selector <%v> in namespace <%v>, isHub: <%v>", label, namespace, isHub) + deps, err := clientKube.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: label, + }) + if err != nil { + klog.Errorf("Failed to get deployment with label selector %s in namespace %s due to %v", label, namespace, err) + } + + return deps, err +} + +func DeleteDeployment(opt TestOptions, isHub bool, name string, namespace string) error { + clientKube := getKubeClient(opt, isHub) + err := clientKube.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete deployment %s in namespace %s due to %v", name, namespace, err) + } + return err +} + +func UpdateDeployment( + opt TestOptions, + isHub bool, + name string, + namespace string, + dep *appv1.Deployment) (*appv1.Deployment, error) { + clientKube := getKubeClient(opt, isHub) + updateDep, err := clientKube.AppsV1().Deployments(namespace).Update(context.TODO(), dep, metav1.UpdateOptions{}) + if err != nil { + klog.Errorf("Failed to update deployment %s in namespace %s due to %v", name, namespace, err) + } + return updateDep, err +} + +func UpdateDeploymentReplicas(opt TestOptions, deployName, crProperty string, desiredReplicas, expectedReplicas int32) error { + clientDynamic := GetKubeClientDynamic(opt, true) + deploy, err := GetDeployment(opt, true, deployName, MCO_NAMESPACE) + if err != nil { + return err + } + deploy.Spec.Replicas = &desiredReplicas + _, err = UpdateDeployment(opt, true, deployName, MCO_NAMESPACE, deploy) + if err != nil { + return err + } + + obs, err := clientDynamic.Resource(NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + return err + } + thanos := obs.Object["spec"].(map[string]interface{})["thanos"] + currentReplicas := thanos.(map[string]interface{})[crProperty].(map[string]interface{})["replicas"].(int64) + if int(currentReplicas) != int(expectedReplicas) { + klog.Errorf("Failed to update deployment %s replicas to %v", deployName, expectedReplicas) + return errors.New("the replicas was not updated successfully") + } + return nil +} diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go new file mode 100644 index 000000000..aea3d1d1b --- /dev/null +++ b/tests/pkg/utils/mco_grafana.go @@ -0,0 +1,14 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +func GetGrafanaURL(opt TestOptions) string { + grafanaConsoleURL := "https://multicloud-console.apps." + opt.HubCluster.BaseDomain + "/grafana/" + if opt.HubCluster.GrafanaURL != "" { + grafanaConsoleURL = opt.HubCluster.GrafanaURL + } else { + opt.HubCluster.GrafanaHost = "multicloud-console.apps." + opt.HubCluster.BaseDomain + } + return grafanaConsoleURL +} diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go new file mode 100644 index 000000000..c042ae11f --- /dev/null +++ b/tests/pkg/utils/mco_managedcluster.go @@ -0,0 +1,150 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + "fmt" + + goversion "github.com/hashicorp/go-version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func UpdateObservabilityFromManagedCluster(opt TestOptions, enableObservability bool) error { + clusterName := GetManagedClusterName(opt) + if clusterName != "" { + clientDynamic := GetKubeClientDynamic(opt, true) + cluster, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).Get(context.TODO(), clusterName, metav1.GetOptions{}) + if err != nil { + return err + } + labels, ok := cluster.Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{}) + if !ok { + cluster.Object["metadata"].(map[string]interface{})["labels"] = map[string]interface{}{} + labels = cluster.Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{}) + } + + if !enableObservability { + labels["observability"] = "disabled" + } else { + delete(labels, "observability") + } + _, updateErr := clientDynamic.Resource(NewOCMManagedClustersGVR()).Update(context.TODO(), cluster, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + } + return nil +} + +func ListManagedClusters(opt TestOptions) ([]string, error) { + clientDynamic := GetKubeClientDynamic(opt, true) + objs, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + clusterNames := []string{} + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + name := metadata["name"].(string) + labels := metadata["labels"].(map[string]interface{}) + if labels != nil { + obsControllerStr := "" + if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { + obsControllerStr = obsController.(string) + } + if obsControllerStr != "unreachable" { + clusterNames = append(clusterNames, name) + } + } + } + + if len(clusterNames) == 0 { + return clusterNames, fmt.Errorf("no managedcluster found") + } + + return clusterNames, nil +} + +func ListOCPManagedClusterIDs(opt TestOptions, minVersionStr string) ([]string, error) { + minVersion, err := goversion.NewVersion(minVersionStr) + if err != nil { + return nil, err + } + clientDynamic := GetKubeClientDynamic(opt, true) + objs, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + clusterIDs := []string{} + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + labels := metadata["labels"].(map[string]interface{}) + if labels != nil { + vendorStr := "" + if vendor, ok := labels["vendor"]; ok { + vendorStr = vendor.(string) + } + obsControllerStr := "" + if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { + obsControllerStr = obsController.(string) + } + if vendorStr == "OpenShift" && obsControllerStr == "available" { + clusterVersionStr := "" + if clusterVersionVal, ok := labels["openshiftVersion"]; ok { + clusterVersionStr = clusterVersionVal.(string) + } + clusterVersion, err := goversion.NewVersion(clusterVersionStr) + if err != nil { + return nil, err + } + if clusterVersion.GreaterThanOrEqual(minVersion) { + clusterIDStr := "" + if clusterID, ok := labels["clusterID"]; ok { + clusterIDStr = clusterID.(string) + } + if len(clusterIDStr) > 0 { + clusterIDs = append(clusterIDs, clusterIDStr) + } + } + } + } + } + + return clusterIDs, nil +} + +func ListKSManagedClusterNames(opt TestOptions) ([]string, error) { + clientDynamic := GetKubeClientDynamic(opt, true) + objs, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + clusterNames := []string{} + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + labels := metadata["labels"].(map[string]interface{}) + if labels != nil { + vendorStr := "" + if vendor, ok := labels["vendor"]; ok { + vendorStr = vendor.(string) + } + obsControllerStr := "" + if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { + obsControllerStr = obsController.(string) + } + if vendorStr != "OpenShift" && obsControllerStr != "unreachable" { + clusterNameStr := "" + if clusterNameVal, ok := labels["name"]; ok { + clusterNameStr = clusterNameVal.(string) + } + if len(clusterNameStr) > 0 { + clusterNames = append(clusterNames, clusterNameStr) + } + } + } + } + + return clusterNames, nil +} diff --git a/tests/pkg/utils/mco_metric.go b/tests/pkg/utils/mco_metric.go new file mode 100644 index 000000000..504039aa1 --- /dev/null +++ b/tests/pkg/utils/mco_metric.go @@ -0,0 +1,162 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "regexp" + "strings" + + "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func ContainManagedClusterMetric(opt TestOptions, query string, matchedLabels []string) (error, bool) { + grafanaConsoleURL := GetGrafanaURL(opt) + path := "/api/datasources/proxy/1/api/v1/query?" + queryParams := url.PathEscape(fmt.Sprintf("query=%s", query)) + klog.V(5).Infof("request url is: %s\n", grafanaConsoleURL+path+queryParams) + req, err := http.NewRequest( + "GET", + grafanaConsoleURL+path+queryParams, + nil) + if err != nil { + return err, false + } + + client := &http.Client{} + if os.Getenv("IS_KIND_ENV") != "true" { + tr := &http.Transport{ + /* #nosec */ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + + client = &http.Client{Transport: tr} + token, err := FetchBearerToken(opt) + if err != nil { + return err, false + } + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } + req.Host = opt.HubCluster.GrafanaHost + } + + resp, err := client.Do(req) + if err != nil { + return err, false + } + + if resp.StatusCode != http.StatusOK { + klog.Errorf("resp: %+v\n", resp) + klog.Errorf("err: %+v\n", err) + return fmt.Errorf("Failed to access managed cluster metrics via grafana console"), false + } + + metricResult, err := ioutil.ReadAll(resp.Body) + klog.V(5).Infof("metricResult: %s\n", metricResult) + if err != nil { + return err, false + } + + if !strings.Contains(string(metricResult), `"status":"success"`) { + return fmt.Errorf("Failed to find valid status from response"), false + } + + if strings.Contains(string(metricResult), `"result":[]`) { + return fmt.Errorf("Failed to find metric name from response"), false + } + + contained := true + for _, label := range matchedLabels { + if !strings.Contains(string(metricResult), label) { + contained = false + break + } + } + if !contained { + return fmt.Errorf("Failed to find metric name from response"), false + } + + return nil, true +} + +type MetricsAllowlist struct { + NameList []string `yaml:"names"` + MatchList []string `yaml:"matches"` + RenameMap map[string]string `yaml:"renames"` + RuleList []Rule `yaml:"rules"` +} + +// Rule is the struct for recording rules and alert rules +type Rule struct { + Record string `yaml:"record"` + Expr string `yaml:"expr"` +} + +func GetDefaultMetricList(opt TestOptions) []string { + allDefaultMetricName := []string{} + cl := getKubeClient(opt, true) + cm, err := cl.CoreV1().ConfigMaps(MCO_NAMESPACE).Get( + context.TODO(), + "observability-metrics-allowlist", + metav1.GetOptions{}, + ) + if err != nil { + klog.Errorf("Failed to get the configmap <%v>: %+v\n", + "observability-metrics-allowlist", + err) + } + + allowlist := &MetricsAllowlist{} + err = yaml.Unmarshal([]byte(cm.Data["metrics_list.yaml"]), allowlist) + if err != nil { + klog.Errorf("Failed to unmarshal data: %+v\n", err) + } + + allDefaultMetricName = append(allDefaultMetricName, allowlist.NameList...) + + // get the metric name from matches section: + // string: __name__="go_goroutines",job="apiserver" + // want: go_goroutines + re := regexp.MustCompile("__name__=\"(\\w+)\"") + for _, name := range allowlist.MatchList { + result := re.FindStringSubmatch(name) + if len(result) > 1 { + allDefaultMetricName = append(allDefaultMetricName, result[1]) + } + } + + for _, name := range allowlist.RenameMap { + allDefaultMetricName = append(allDefaultMetricName, name) + } + + for _, rule := range allowlist.RuleList { + allDefaultMetricName = append(allDefaultMetricName, rule.Record) + } + return allDefaultMetricName +} + +func GetIgnoreMetricMap() map[string]bool { + txtlines := map[string]bool{} + file, err := os.Open("../testdata/ignored-metric-list") + if err != nil { + klog.Errorf("failed to open the ignored-metric-list file: %+v\n", err) + } + + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + txtlines[scanner.Text()] = true + } + return txtlines +} diff --git a/tests/pkg/utils/mco_namespace.go b/tests/pkg/utils/mco_namespace.go new file mode 100644 index 000000000..5c66aa37b --- /dev/null +++ b/tests/pkg/utils/mco_namespace.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func GetNamespace(opt TestOptions, isHub bool, namespace string) (error, *v1.Namespace) { + clientKube := getKubeClient(opt, isHub) + + ns, err := clientKube.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + klog.Errorf("Failed to get namespace %s due to %v", namespace, err) + return err, nil + } + return nil, ns +} diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go new file mode 100644 index 000000000..7dafdcb52 --- /dev/null +++ b/tests/pkg/utils/mco_oba.go @@ -0,0 +1,90 @@ +package utils + +import ( + "context" + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +const ( + ManagedClusterAddOnDisabledMessage = "enableMetrics is set to False" + ManagedClusterAddOnEnabledMessage = "Cluster metrics sent successfully" +) + +func CheckOBAStatus(opt TestOptions, namespace, status string) error { + dynClient := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + oba, err := dynClient.Resource(NewMCOAddonGVR()).Namespace(namespace).Get(context.TODO(), "observability-addon", metav1.GetOptions{}) + if err != nil { + return err + } + if oba.Object["status"] != nil && strings.Contains(fmt.Sprint(oba.Object["status"]), status) { + return nil + } else { + return fmt.Errorf("observability-addon is not ready for managed cluster %s", namespace) + } +} + +func CheckManagedClusterAddonsStatus(opt TestOptions, namespace, status string) error { + dynClient := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + mca, err := dynClient.Resource(NewMCOManagedClusterAddonsGVR()).Namespace(namespace).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + if err != nil { + return err + } + if mca.Object["status"] != nil && strings.Contains(fmt.Sprint(mca.Object["status"]), status) { + return nil + } else { + return fmt.Errorf("observability-controller is disabled for managed cluster %s", namespace) + } +} + +func CheckAllOBAsEnabled(opt TestOptions) error { + clusters, err := ListManagedClusters(opt) + if err != nil { + return err + } + klog.V(1).Infof("Have the following managedclusters: <%v>", clusters) + + for _, cluster := range clusters { + klog.V(1).Infof("Check OBA status for cluster <%v>", cluster) + err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + if err != nil { + return err + } + + klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + if err != nil { + return err + } + } + return nil +} + +func CheckAllOBADisabled(opt TestOptions) error { + clusters, err := ListManagedClusters(opt) + if err != nil { + return err + } + for _, cluster := range clusters { + err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) + if err != nil { + return err + } + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) + if err != nil { + return err + } + } + return nil +} diff --git a/tests/pkg/utils/mco_pods.go b/tests/pkg/utils/mco_pods.go new file mode 100644 index 000000000..ee3465c27 --- /dev/null +++ b/tests/pkg/utils/mco_pods.go @@ -0,0 +1,64 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "bytes" + "context" + "io" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func GetPodList(opt TestOptions, isHub bool, namespace string, labelSelector string) (error, *v1.PodList) { + clientKube := getKubeClient(opt, isHub) + listOption := metav1.ListOptions{} + if labelSelector != "" { + listOption.LabelSelector = labelSelector + } + podList, err := clientKube.CoreV1().Pods(namespace).List(context.TODO(), listOption) + if err != nil { + klog.Errorf("Failed to get pod list in namespace %s using labelselector %s due to %v", namespace, labelSelector, err) + return err, podList + } + if podList != nil && len(podList.Items) == 0 { + klog.V(1).Infof("No pod found for labelselector %s", labelSelector) + } + return nil, podList +} + +func DeletePod(opt TestOptions, isHub bool, namespace, name string) error { + clientKube := getKubeClient(opt, isHub) + err := clientKube.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete pod %s in namespace %s due to %v", name, namespace, err) + return err + } + return nil +} + +func GetPodLogs(opt TestOptions, isHub bool, namespace, podName, containerName string, previous bool, tailLines int64) (string, error) { + clientKube := getKubeClient(opt, isHub) + podLogOpts := v1.PodLogOptions{ + Container: containerName, + Previous: previous, + TailLines: &tailLines, + } + req := clientKube.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts) + podLogs, err := req.Stream(context.TODO()) + if err != nil { + klog.Errorf("Failed to get logs for %s/%s in namespace %s due to %v", podName, containerName, namespace, err) + return "", err + } + defer podLogs.Close() + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + klog.Errorf("Failed to copy pod logs to buffer due to %v", err) + return "", err + } + return buf.String(), nil +} diff --git a/tests/pkg/utils/mco_router_ca.go b/tests/pkg/utils/mco_router_ca.go new file mode 100644 index 000000000..b5d4ad46f --- /dev/null +++ b/tests/pkg/utils/mco_router_ca.go @@ -0,0 +1,31 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" +) + +const ( + RouterCertsSecretName = "router-certs-default" +) + +func GetRouterCA(cli kubernetes.Interface) ([]byte, error) { + var caCrt []byte + caSecret, err := cli.CoreV1().Secrets("openshift-ingress").Get(context.TODO(), RouterCertsSecretName, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get router certificate secret %s due to %v", RouterCertsSecretName, err) + return caCrt, err + } + caCrt, ok := caSecret.Data["tls.crt"] + if ok { + return caCrt, nil + } + return caCrt, fmt.Errorf("failed to get tls.crt from %s secret", RouterCertsSecretName) +} diff --git a/tests/pkg/utils/mco_sa.go b/tests/pkg/utils/mco_sa.go new file mode 100644 index 000000000..a23fac71e --- /dev/null +++ b/tests/pkg/utils/mco_sa.go @@ -0,0 +1,49 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func DeleteSA(opt TestOptions, isHub bool, namespace string, + name string) error { + clientKube := getKubeClient(opt, isHub) + err := clientKube.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("Failed to delete serviceaccount %s due to %v", name, err) + } + return err +} + +func UpdateSA(opt TestOptions, isHub bool, namespace string, + sa *v1.ServiceAccount) (error, *v1.ServiceAccount) { + clientKube := getKubeClient(opt, isHub) + updateSA, err := clientKube.CoreV1().ServiceAccounts(namespace).Update(context.TODO(), sa, metav1.UpdateOptions{}) + if err != nil { + klog.Errorf("Failed to update serviceaccount %s due to %v", sa.GetName(), err) + } + return err, updateSA +} + +func CreateSA(opt TestOptions, isHub bool, namespace string, + sa *v1.ServiceAccount) error { + clientKube := getKubeClient(opt, isHub) + _, err := clientKube.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) + if err != nil { + if errors.IsAlreadyExists(err) { + klog.V(1).Infof("serviceaccount %s already exists, updating...", sa.GetName()) + err, _ := UpdateSA(opt, isHub, namespace, sa) + return err + } + klog.Errorf("Failed to create serviceaccount %s due to %v", sa.GetName(), err) + return err + } + return nil +} diff --git a/tests/pkg/utils/mco_statefulset.go b/tests/pkg/utils/mco_statefulset.go new file mode 100644 index 000000000..fae1b2b90 --- /dev/null +++ b/tests/pkg/utils/mco_statefulset.go @@ -0,0 +1,35 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + + appv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +func GetStatefulSet(opt TestOptions, isHub bool, name string, + namespace string) (*appv1.StatefulSet, error) { + clientKube := getKubeClient(opt, isHub) + sts, err := clientKube.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get statefulset %s in namespace %s due to %v", name, namespace, err) + } + return sts, err +} + +func GetStatefulSetWithLabel(opt TestOptions, isHub bool, label string, + namespace string) (*appv1.StatefulSetList, error) { + clientKube := getKubeClient(opt, isHub) + sts, err := clientKube.AppsV1().StatefulSets(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: label, + }) + + if err != nil { + klog.Errorf("Failed to get statefulset with label selector %s in namespace %s due to %v", label, namespace, err) + } + return sts, err +} diff --git a/tests/pkg/utils/options.go b/tests/pkg/utils/options.go new file mode 100644 index 000000000..4c7bc8578 --- /dev/null +++ b/tests/pkg/utils/options.go @@ -0,0 +1,81 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +type TestOptionsContainer struct { + Options TestOptions `yaml:"options"` +} + +// Define options available for Tests to consume +type TestOptions struct { + HubCluster Cluster `yaml:"hub"` + ManagedClusters []Cluster `yaml:"clusters"` + ImageRegistry Registry `yaml:"imageRegistry,omitempty"` + KubeConfig string `yaml:"kubeconfig,omitempty"` + Connection CloudConnection `yaml:"cloudConnection,omitempty"` + Headless string `yaml:"headless,omitempty"` + OwnerPrefix string `yaml:"ownerPrefix,omitempty"` +} + +// Define the shape of clusters that may be added under management +type Cluster struct { + Name string `yaml:"name,omitempty"` + Namespace string `yaml:"namespace,omitempty"` + Tags map[string]bool `yaml:"tags,omitempty"` + BaseDomain string `yaml:"baseDomain"` + User string `yaml:"user,omitempty"` + Password string `yaml:"password,omitempty"` + KubeContext string `yaml:"kubecontext,omitempty"` + ClusterServerURL string `yaml:"clusterServerURL,omitempty"` + GrafanaURL string `yaml:"grafanaURL,omitempty"` + GrafanaHost string `yaml:"grafanaHost,omitempty"` + KubeConfig string `yaml:"kubeconfig,omitempty"` +} + +// Define the image registry +type Registry struct { + // example: quay.io/stolostron + Server string `yaml:"server"` + User string `yaml:"user"` + Password string `yaml:"password"` +} + +// CloudConnection struct for bits having to do with Connections +type CloudConnection struct { + PullSecret string `yaml:"pullSecret"` + SSHPrivateKey string `yaml:"sshPrivatekey"` + SSHPublicKey string `yaml:"sshPublickey"` + Keys APIKeys `yaml:"apiKeys,omitempty"` + OCPRelease string `yaml:"ocpRelease,omitempty"` +} + +type APIKeys struct { + AWS AWSAPIKey `yaml:"aws,omitempty"` + GCP GCPAPIKey `yaml:"gcp,omitempty"` + Azure AzureAPIKey `yaml:"azure,omitempty"` +} + +type AWSAPIKey struct { + AWSAccessID string `yaml:"awsAccessKeyID"` + AWSAccessSecret string `yaml:"awsSecretAccessKeyID"` + BaseDnsDomain string `yaml:"baseDnsDomain"` + Region string `yaml:"region"` +} + +type GCPAPIKey struct { + ProjectID string `yaml:"gcpProjectID"` + ServiceAccountJsonKey string `yaml:"gcpServiceAccountJsonKey"` + BaseDnsDomain string `yaml:"baseDnsDomain"` + Region string `yaml:"region"` +} + +type AzureAPIKey struct { + BaseDnsDomain string `yaml:"baseDnsDomain"` + BaseDomainRGN string `yaml:"azureBaseDomainRGN"` + Region string `yaml:"region"` + SubscriptionID string `yaml:"subscriptionID"` + TenantID string `yaml:"tenantID"` + ClientID string `yaml:"clientID"` + ClientSecret string `yaml:"clientSecret"` +} diff --git a/tests/pkg/utils/utils.go b/tests/pkg/utils/utils.go new file mode 100644 index 000000000..46f53b1d1 --- /dev/null +++ b/tests/pkg/utils/utils.go @@ -0,0 +1,707 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + + "github.com/ghodss/yaml" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + storagev1 "k8s.io/api/storage/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/version" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func NewUnversionedRestClient(url, kubeconfig, ctx string) *rest.RESTClient { + klog.V(5).Infof("Create unversionedRestClient for url %s using kubeconfig path %s\n", url, kubeconfig) + config, err := LoadConfig(url, kubeconfig, ctx) + if err != nil { + panic(err) + } + + oldNegotiatedSerializer := config.NegotiatedSerializer + config.NegotiatedSerializer = unstructuredscheme.NewUnstructuredNegotiatedSerializer() + kubeRESTClient, err := rest.UnversionedRESTClientFor(config) + // restore cfg before leaving + defer func(cfg *rest.Config) { cfg.NegotiatedSerializer = oldNegotiatedSerializer }(config) + + if err != nil { + panic(err) + } + + return kubeRESTClient +} + +func NewKubeClient(url, kubeconfig, ctx string) kubernetes.Interface { + klog.V(5).Infof("Create kubeclient for url %s using kubeconfig path %s\n", url, kubeconfig) + config, err := LoadConfig(url, kubeconfig, ctx) + if err != nil { + panic(err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + panic(err) + } + + return clientset +} + +func NewKubeClientDynamic(url, kubeconfig, ctx string) dynamic.Interface { + klog.V(5).Infof("Create kubeclient dynamic for url %s using kubeconfig path %s\n", url, kubeconfig) + config, err := LoadConfig(url, kubeconfig, ctx) + if err != nil { + panic(err) + } + + clientset, err := dynamic.NewForConfig(config) + if err != nil { + panic(err) + } + + return clientset +} + +func NewKubeClientAPIExtension(url, kubeconfig, ctx string) apiextensionsclientset.Interface { + klog.V(5).Infof("Create kubeclient apiextension for url %s using kubeconfig path %s\n", url, kubeconfig) + config, err := LoadConfig(url, kubeconfig, ctx) + if err != nil { + panic(err) + } + + clientset, err := apiextensionsclientset.NewForConfig(config) + if err != nil { + panic(err) + } + + return clientset +} + +// func NewKubeClientDiscovery(url, kubeconfig, ctx string) *discovery.DiscoveryClient { +// klog.V(5).Infof("Create kubeclient discovery for url %s using kubeconfig path %s\n", url, kubeconfig) +// config, err := LoadConfig(url, kubeconfig, ctx) +// if err != nil { +// panic(err) +// } + +// clientset, err := discovery.NewDiscoveryClientForConfig(config) +// if err != nil { +// panic(err) +// } + +// return clientset +// } + +func CreateMCOTestingRBAC(opt TestOptions) error { + // create new service account and new clusterrolebinding and bind the serviceaccount to cluster-admin clusterrole + // then the bearer token can be retrieved from the secret of created serviceaccount + mcoTestingCRBName := "mco-e2e-testing-crb" + mcoTestingSAName := "mco-e2e-testing-sa" + mcoTestingCRB := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoTestingCRBName, + Labels: map[string]string{ + "app": "mco-e2e-testing", + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "cluster-admin", + APIGroup: "rbac.authorization.k8s.io", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: mcoTestingSAName, + Namespace: MCO_NAMESPACE, + }, + }, + } + if err := CreateCRB(opt, true, mcoTestingCRB); err != nil { + return fmt.Errorf("failed to create clusterrolebing for %s: %v", mcoTestingCRB.GetName(), err) + } + + mcoTestingSA := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcoTestingSAName, + Namespace: MCO_NAMESPACE, + }, + } + if err := CreateSA(opt, true, MCO_NAMESPACE, mcoTestingSA); err != nil { + return fmt.Errorf("failed to create serviceaccount for %s: %v", mcoTestingSA.GetName(), err) + } + return nil +} + +func DeleteMCOTestingRBAC(opt TestOptions) error { + // delete the created service account and clusterrolebinding + mcoTestingCRBName := "mco-e2e-testing-crb" + mcoTestingSAName := "mco-e2e-testing-sa" + if err := DeleteCRB(opt, true, mcoTestingCRBName); err != nil { + return err + } + if err := DeleteSA(opt, true, MCO_NAMESPACE, mcoTestingSAName); err != nil { + return err + } + return nil +} + +func FetchBearerToken(opt TestOptions) (string, error) { + config, err := LoadConfig( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + if err != nil { + return "", err + } + + if config.BearerToken != "" { + return config.BearerToken, nil + } + + clientKube := NewKubeClient(opt.HubCluster.ClusterServerURL, opt.KubeConfig, opt.HubCluster.KubeContext) + secretList, err := clientKube.CoreV1().Secrets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{FieldSelector: "type=kubernetes.io/service-account-token"}) + if err != nil { + return "", err + } + for _, secret := range secretList.Items { + if secret.GetObjectMeta() != nil && len(secret.GetObjectMeta().GetAnnotations()) > 0 { + annos := secret.GetObjectMeta().GetAnnotations() + sa, saExists := annos["kubernetes.io/service-account.name"] + _, createByExists := annos["kubernetes.io/created-by"] + if saExists && !createByExists && sa == "mco-e2e-testing-sa" { + data := secret.Data + if token, ok := data["token"]; ok { + return string(token), nil + } + } + } + } + return "", fmt.Errorf("failed to get bearer token") +} + +func LoadConfig(url, kubeconfig, ctx string) (*rest.Config, error) { + if kubeconfig == "" { + kubeconfig = os.Getenv("KUBECONFIG") + } + klog.V(5).Infof("Kubeconfig path %s\n", kubeconfig) + // If we have an explicit indication of where the kubernetes config lives, read that. + if kubeconfig != "" { + if ctx == "" { + // klog.V(5).Infof("clientcmd.BuildConfigFromFlags with %s and %s", url, kubeconfig) + return clientcmd.BuildConfigFromFlags(url, kubeconfig) + } else { + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, + &clientcmd.ConfigOverrides{ + CurrentContext: ctx, + }).ClientConfig() + } + } + // If not, try the in-cluster config. + if c, err := rest.InClusterConfig(); err == nil { + return c, nil + } + // If no in-cluster config, try the default location in the user's home directory. + if usr, err := user.Current(); err == nil { + klog.V(5).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", url, filepath.Join(usr.HomeDir, ".kube", "config")) + if c, err := clientcmd.BuildConfigFromFlags(url, filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + return c, nil + } + } + + return nil, fmt.Errorf("could not create a valid kubeconfig") +} + +//Apply a multi resources file to the cluster described by the url, kubeconfig and ctx. +//url of the cluster +//kubeconfig which contains the ctx +//ctx, the ctx to use +//yamlB, a byte array containing the resources file +func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { + yamls := strings.Split(string(yamlB), "---") + // yamlFiles is an []string + for _, f := range yamls { + if len(strings.TrimSpace(f)) == 0 { + continue + } + + obj := &unstructured.Unstructured{} + klog.V(5).Infof("obj:%v\n", obj.Object) + err := yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + + var kind string + if v, ok := obj.Object["kind"]; !ok { + return fmt.Errorf("kind attribute not found in %s", f) + } else { + kind = v.(string) + } + + klog.V(5).Infof("kind: %s\n", kind) + + var apiVersion string + if v, ok := obj.Object["apiVersion"]; !ok { + return fmt.Errorf("apiVersion attribute not found in %s", f) + } else { + apiVersion = v.(string) + } + klog.V(5).Infof("apiVersion: %s\n", apiVersion) + + clientKube := NewKubeClient(url, kubeconfig, ctx) + clientAPIExtension := NewKubeClientAPIExtension(url, kubeconfig, ctx) + // now use switch over the type of the object + // and match each type-case + switch kind { + case "CustomResourceDefinition": + klog.V(5).Infof("Install CRD: %s\n", f) + obj := &apiextensionsv1.CustomResourceDefinition{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + existingObject.Spec = obj.Spec + klog.Warningf("CRD %s already exists, updating!", existingObject.Name) + _, err = clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), existingObject, metav1.UpdateOptions{}) + } + case "Namespace": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.Namespace{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().Namespaces().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s already exists, updating!", obj.Kind, obj.Name) + _, err = clientKube.CoreV1().Namespaces().Update(context.TODO(), existingObject, metav1.UpdateOptions{}) + } + case "ServiceAccount": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.ServiceAccount{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().ServiceAccounts(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "ClusterRoleBinding": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &rbacv1.ClusterRoleBinding{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.RbacV1().ClusterRoleBindings().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.RbacV1().ClusterRoleBindings().Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.RbacV1().ClusterRoleBindings().Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "Secret": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.Secret{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().Secrets(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().Secrets(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().Secrets(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "ConfigMap": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.ConfigMap{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().ConfigMaps(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "Service": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.Service{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().Services(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().Services(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().Services(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "PersistentVolumeClaim": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.PersistentVolumeClaim{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "Deployment": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &appsv1.Deployment{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.AppsV1().Deployments(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.AppsV1().Deployments(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.AppsV1().Deployments(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "LimitRange": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.LimitRange{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().LimitRanges(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "ResourceQuota": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &corev1.ResourceQuota{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.CoreV1().ResourceQuotas(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + case "StorageClass": + klog.V(5).Infof("Install %s: %s\n", kind, f) + obj := &storagev1.StorageClass{} + err = yaml.Unmarshal([]byte(f), obj) + if err != nil { + return err + } + existingObject, errGet := clientKube.StorageV1().StorageClasses().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + if errGet != nil { + _, err = clientKube.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.ObjectMeta = existingObject.ObjectMeta + klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) + _, err = clientKube.StorageV1().StorageClasses().Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + default: + switch kind { + case "MultiClusterObservability": + klog.V(5).Infof("Install MultiClusterObservability: %s\n", f) + default: + return fmt.Errorf("resource %s not supported", kind) + } + + gvr := NewMCOGVRV1BETA2() + if apiVersion == "observability.open-cluster-management.io/v1beta1" { + gvr = NewMCOGVRV1BETA1() + } + + // url string, kubeconfig string, ctx string + opt := TestOptions{ + HubCluster: Cluster{ + ClusterServerURL: url, + KubeContext: ctx, + }, + KubeConfig: kubeconfig, + } + clientDynamic := NewKubeClientDynamic(url, kubeconfig, ctx) + if ns := obj.GetNamespace(); ns != "" { + existingObject, errGet := clientDynamic.Resource(gvr).Namespace(ns).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) + if errGet != nil { + if ips, err := GetPullSecret(opt); err == nil { + obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips + } + _, err = clientDynamic.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.Object["metadata"] = existingObject.Object["metadata"] + klog.Warningf("%s %s/%s already exists, updating!", obj.GetKind(), obj.GetNamespace(), obj.GetName()) + _, err = clientDynamic.Resource(gvr).Namespace(ns).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + } else { + existingObject, errGet := clientDynamic.Resource(gvr).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) + if errGet != nil { + if ips, err := GetPullSecret(opt); err == nil { + obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips + } + _, err = clientDynamic.Resource(gvr).Create(context.TODO(), obj, metav1.CreateOptions{}) + } else { + obj.Object["metadata"] = existingObject.Object["metadata"] + klog.Warningf("%s %s already exists, updating!", obj.GetKind(), obj.GetName()) + _, err = clientDynamic.Resource(gvr).Update(context.TODO(), obj, metav1.UpdateOptions{}) + } + } + } + + if err != nil { + return err + } + } + return nil +} + +//StatusContainsTypeEqualTo check if u contains a condition type with value typeString +func StatusContainsTypeEqualTo(u *unstructured.Unstructured, typeString string) bool { + if u != nil { + if v, ok := u.Object["status"]; ok { + status := v.(map[string]interface{}) + if v, ok := status["conditions"]; ok { + conditions := v.([]interface{}) + for _, v := range conditions { + condition := v.(map[string]interface{}) + if v, ok := condition["type"]; ok { + if v.(string) == typeString { + return true + } + } + } + } + } + } + return false +} + +//GetCluster returns the first cluster with a given tag +func GetCluster(tag string, clusters []Cluster) *Cluster { + for _, cluster := range clusters { + if tag, ok := cluster.Tags[tag]; ok { + if tag { + return &cluster + } + } + } + return nil +} + +//GetClusters returns all clusters with a given tag +func GetClusters(tag string, clusters []Cluster) []*Cluster { + filteredClusters := make([]*Cluster, 0) + for i, cluster := range clusters { + if tag, ok := cluster.Tags[tag]; ok { + if tag { + filteredClusters = append(filteredClusters, &clusters[i]) + } + } + } + return filteredClusters +} + +func HaveServerResources(c Cluster, kubeconfig string, expectedAPIGroups []string) error { + clientAPIExtension := NewKubeClientAPIExtension(c.ClusterServerURL, kubeconfig, c.KubeContext) + clientDiscovery := clientAPIExtension.Discovery() + for _, apiGroup := range expectedAPIGroups { + klog.V(1).Infof("Check if %s exists", apiGroup) + _, err := clientDiscovery.ServerResourcesForGroupVersion(apiGroup) + if err != nil { + klog.V(1).Infof("Error while retrieving server resource %s: %s", apiGroup, err.Error()) + return err + } + } + return nil +} + +func HaveCRDs(c Cluster, kubeconfig string, expectedCRDs []string) error { + clientAPIExtension := NewKubeClientAPIExtension(c.ClusterServerURL, kubeconfig, c.KubeContext) + clientAPIExtensionV1 := clientAPIExtension.ApiextensionsV1() + for _, crd := range expectedCRDs { + klog.V(1).Infof("Check if %s exists", crd) + _, err := clientAPIExtensionV1.CustomResourceDefinitions().Get(context.TODO(), crd, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("Error while retrieving crd %s: %s", crd, err.Error()) + return err + } + } + return nil +} + +func HaveDeploymentsInNamespace(c Cluster, kubeconfig string, namespace string, expectedDeploymentNames []string) error { + + client := NewKubeClient(c.ClusterServerURL, kubeconfig, c.KubeContext) + versionInfo, err := client.Discovery().ServerVersion() + if err != nil { + return err + } + klog.V(1).Infof("Server version info: %v", versionInfo) + + deployments := client.AppsV1().Deployments(namespace) + + for _, deploymentName := range expectedDeploymentNames { + klog.V(1).Infof("Check if deployment %s exists", deploymentName) + deployment, err := deployments.Get(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + klog.V(1).Infof("Error while retrieving deployment %s: %s", deploymentName, err.Error()) + return err + } + + if deployment.Status.Replicas != deployment.Status.ReadyReplicas { + err = fmt.Errorf("%s: Expect %d but got %d Ready replicas", + deploymentName, + deployment.Status.Replicas, + deployment.Status.ReadyReplicas) + klog.Errorln(err) + return err + } + + for _, condition := range deployment.Status.Conditions { + if condition.Reason == "MinimumReplicasAvailable" { + if condition.Status != corev1.ConditionTrue { + err = fmt.Errorf("%s: Expect %s but got %s", + deploymentName, + condition.Status, + corev1.ConditionTrue) + klog.Errorln(err) + return err + } + } + } + } + + return nil +} + +func GetKubeVersion(client *rest.RESTClient) version.Info { + kubeVersion := version.Info{} + + versionBody, err := client.Get().AbsPath("/version").Do(context.TODO()).Raw() + if err != nil { + klog.Errorf("fail to GET /version with %v", err) + return version.Info{} + } + + err = json.Unmarshal(versionBody, &kubeVersion) + if err != nil { + klog.Errorf("fail to Unmarshal, got '%s': %v", string(versionBody), err) + return version.Info{} + } + + return kubeVersion +} + +func IsOpenshift(client *rest.RESTClient) bool { + //check whether the cluster is openshift or not for openshift version 3.11 and before + _, err := client.Get().AbsPath("/version/openshift").Do(context.TODO()).Raw() + if err == nil { + klog.V(5).Info("Found openshift version from /version/openshift") + return true + } + + //check whether the cluster is openshift or not for openshift version 4.1 + _, err = client.Get().AbsPath("/apis/config.openshift.io/v1/clusterversions").Do(context.TODO()).Raw() + if err == nil { + klog.V(5).Info("Found openshift version from /apis/config.openshift.io/v1/clusterversions") + return true + } + + klog.V(5).Infof("fail to GET openshift version, assuming not OpenShift: %s", err.Error()) + return false +} + +// IntegrityChecking checks to ensure all required conditions are met when completing the specs +func IntegrityChecking(opt TestOptions) error { + return CheckMCOComponents(opt) +} + +// GetPullSecret checks the secret from MCH CR and return the secret name +func GetPullSecret(opt TestOptions) (string, error) { + clientDynamic := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + mchList, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return "", err + } + + if len(mchList.Items) == 0 { + return "", fmt.Errorf("can not find the MCH operator CR in the cluster") + } + + mchName := mchList.Items[0].GetName() + mchNs := mchList.Items[0].GetNamespace() + + getMCH, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()).Namespace(mchNs).Get(context.TODO(), mchName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + spec := getMCH.Object["spec"].(map[string]interface{}) + if _, ok := spec["imagePullSecret"]; !ok { + return "", fmt.Errorf("can not find imagePullSecret in MCH CR") + } + + ips := spec["imagePullSecret"].(string) + return ips, nil +} diff --git a/tests/resources/.gitignore b/tests/resources/.gitignore new file mode 100644 index 000000000..1f22f3d38 --- /dev/null +++ b/tests/resources/.gitignore @@ -0,0 +1,2 @@ +options.yaml +env.list \ No newline at end of file diff --git a/tests/resources/env.list.template b/tests/resources/env.list.template new file mode 100644 index 000000000..1b87f2b81 --- /dev/null +++ b/tests/resources/env.list.template @@ -0,0 +1,4 @@ +BUCKET=YOUR_S3_BUCKET +REGION=YOUR_S3_REGION +AWS_ACCESS_KEY_ID=YOUR_S3_AWS_ACCESS_KEY_ID +AWS_SECRET_ACCESS_KEY=YOUR_S3_AWS_SECRET_ACCESS_KEY \ No newline at end of file diff --git a/tests/resources/options.yaml.template b/tests/resources/options.yaml.template new file mode 100644 index 000000000..e354d5960 --- /dev/null +++ b/tests/resources/options.yaml.template @@ -0,0 +1,4 @@ +options: + hub: + name: HUB_CLUSTER_NAME + baseDomain: BASE_DOMAIN diff --git a/tests/run-in-kind/env.sh b/tests/run-in-kind/env.sh new file mode 100755 index 000000000..f1f641af1 --- /dev/null +++ b/tests/run-in-kind/env.sh @@ -0,0 +1 @@ +#!/usr/bin/env bash diff --git a/tests/run-in-kind/grafana/grafana-config-test.yaml b/tests/run-in-kind/grafana/grafana-config-test.yaml new file mode 100644 index 000000000..18c2da341 --- /dev/null +++ b/tests/run-in-kind/grafana/grafana-config-test.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +stringData: + grafana.ini: | + [auth] + disable_login_form = false + disable_signout_menu = false + + [auth.anonymous] + enabled = true + org_role = Admin + + [paths] + data = /var/lib/grafana + logs = /var/lib/grafana/logs + plugins = /var/lib/grafana/plugins + provisioning = /etc/grafana/provisioning + + [security] + admin_user = admin + admin_password = secret + + [server] + http_port = 3001 + + [users] + viewers_can_edit = true +kind: Secret +metadata: + name: grafana-config-test + namespace: open-cluster-management-observability +type: Opaque diff --git a/tests/run-in-kind/grafana/grafana-datasources-test.yaml b/tests/run-in-kind/grafana/grafana-datasources-test.yaml new file mode 100644 index 000000000..eb2449982 --- /dev/null +++ b/tests/run-in-kind/grafana/grafana-datasources-test.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +stringData: + datasources.yaml: | + apiVersion: 1 + datasources: + - access: proxy + basicAuth: false + basicAuthPassword: "" + basicAuthUser: "" + editable: false + isDefault: true + name: Observatorium + orgId: 0 + type: prometheus + url: http://observability-thanos-query-frontend.open-cluster-management-observability.svc.cluster.local:9090 + version: 0 + jsonData: null + secureJsonData: null +kind: Secret +metadata: + name: grafana-datasources-test + namespace: open-cluster-management-observability +type: Opaque diff --git a/tests/run-in-kind/grafana/grafana-svc.yaml b/tests/run-in-kind/grafana/grafana-svc.yaml new file mode 100644 index 000000000..c5dba0936 --- /dev/null +++ b/tests/run-in-kind/grafana/grafana-svc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: multicluster-observability-grafana-test + name: grafana-test + namespace: open-cluster-management-observability +spec: + externalTrafficPolicy: Cluster + ports: + - name: grafana-http + nodePort: 31001 + port: 3001 + protocol: TCP + targetPort: 3001 + selector: + app: multicluster-observability-grafana-test + type: NodePort diff --git a/tests/run-in-kind/kind/kind-hub.config.yaml b/tests/run-in-kind/kind/kind-hub.config.yaml new file mode 100644 index 000000000..b1bf78a1c --- /dev/null +++ b/tests/run-in-kind/kind/kind-hub.config.yaml @@ -0,0 +1,17 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraPortMappings: + - containerPort: 80 + hostPort: 80 + listenAddress: "0.0.0.0" + - containerPort: 443 + hostPort: 443 + listenAddress: "0.0.0.0" + - containerPort: 6443 + hostPort: 32806 + listenAddress: "0.0.0.0" + - containerPort: 31001 + hostPort: 31001 + listenAddress: "127.0.0.1" diff --git a/tests/run-in-kind/req_crds/clusteroperators-crd.yaml b/tests/run-in-kind/req_crds/clusteroperators-crd.yaml new file mode 100644 index 000000000..d83e27c47 --- /dev/null +++ b/tests/run-in-kind/req_crds/clusteroperators-crd.yaml @@ -0,0 +1,161 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusteroperators.config.openshift.io +spec: + conversion: + strategy: None + group: config.openshift.io + names: + kind: ClusterOperator + listKind: ClusterOperatorList + plural: clusteroperators + shortNames: + - co + singular: clusteroperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterOperator is the Custom Resource object which holds the + current state of an operator. This object is used by operators to convey + their state to the rest of the cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It + is consistent with status information across the Kubernetes ecosystem. + properties: + conditions: + description: conditions describes the state of the operator's managed + and monitored components. + items: + description: ClusterOperatorStatusCondition represents the state + of the operator's managed and monitored components. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + format: date-time + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. + type: string + reason: + description: reason is the CamelCase reason for the condition's + current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + extension: + description: extension contains any additional status information + specific to the operator which owns this status object. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" + or related to this operator. Common uses are: 1. the detailed resource + driving the operator 2. operator namespaces 3. operand namespaces' + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + type: array + versions: + description: versions is a slice of operator and operand version tuples. Operators + which manage multiple operands will have multiple operand entries + in the array. Available operators must report the version of the + operator itself with the name "operator". An operator reports a + new "operator" version when it has rolled out the new version to + all of its operands. + items: + properties: + name: + description: name is the name of the particular operand this + version is for. It usually matches container images, not + operators. + type: string + version: + description: version indicates which version of a particular + operand is currently being managed. It must always match + the Available operand. If 1.0.0 is Available, then this must + indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + required: + - name + - version + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/tests/run-in-kind/req_crds/ingresses-crd.yaml b/tests/run-in-kind/req_crds/ingresses-crd.yaml new file mode 100644 index 000000000..b570534a5 --- /dev/null +++ b/tests/run-in-kind/req_crds/ingresses-crd.yaml @@ -0,0 +1,58 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + name: ingresses.config.openshift.io +spec: + conversion: + strategy: None + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Ingress holds cluster-wide information about ingress, including + the default ingress domain used for routes. The canonical name is `cluster`. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + domain: + description: "domain is used to generate a default host name for a + route when the route's host name is empty. The generated host name + will follow this pattern: \"..\". + \n It is also used as the default wildcard domain suffix for ingress. + The default ingresscontroller domain will follow this pattern: \"*.\". + \n Once set, changing domain is not currently supported." + type: string + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/tests/run-in-kind/req_crds/servicecas-crd.yaml b/tests/run-in-kind/req_crds/servicecas-crd.yaml new file mode 100644 index 000000000..c35910b44 --- /dev/null +++ b/tests/run-in-kind/req_crds/servicecas-crd.yaml @@ -0,0 +1,162 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + name: servicecas.operator.openshift.io +spec: + conversion: + strategy: None + group: operator.openshift.io + names: + kind: ServiceCA + listKind: ServiceCAList + plural: servicecas + singular: serviceca + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceCA provides information to configure an operator to manage + the service cert controllers + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/tests/run-in-kind/router/route_crd.yaml b/tests/run-in-kind/router/route_crd.yaml new file mode 100644 index 000000000..ce8c9221e --- /dev/null +++ b/tests/run-in-kind/router/route_crd.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + # name must match the spec fields below, and be in the form: . + name: routes.route.openshift.io +spec: + # group name to use for REST API: /apis// + group: route.openshift.io + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + # either Namespaced or Cluster + scope: Namespaced + subresources: + # enable spec/status + status: {} + names: + # plural name to be used in the URL: /apis/// + plural: routes + # singular name to be used as an alias on the CLI and for display + singular: route + # kind is normally the CamelCased singular type. Your resource manifests use this. + kind: Route + additionalPrinterColumns: + - name: Host + type: string + JSONPath: .status.ingress[0].host + - name: Admitted + type: string + JSONPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + - name: Service + type: string + JSONPath: .spec.to.name + - name: TLS + type: string + JSONPath: .spec.tls.type diff --git a/tests/run-in-kind/router/router.yaml b/tests/run-in-kind/router/router.yaml new file mode 100644 index 000000000..e51ee55cb --- /dev/null +++ b/tests/run-in-kind/router/router.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ingress-router + namespace: openshift-ingress + labels: + k8s-app: ingress-router +spec: + selector: + matchLabels: + k8s-app: ingress-router + template: + metadata: + labels: + k8s-app: ingress-router + spec: + serviceAccountName: ingress-router + containers: + - env: + - name: ROUTER_LISTEN_ADDR + value: 0.0.0.0:1936 + - name: ROUTER_METRICS_TYPE + value: haproxy + - name: ROUTER_SERVICE_HTTPS_PORT + value: "443" + - name: ROUTER_SERVICE_HTTP_PORT + value: "80" + - name: ROUTER_THREADS + value: "4" + image: quay.io/openshift/origin-haproxy-router:v4.0.0 + livenessProbe: + httpGet: + host: localhost + path: /healthz + port: 1936 + initialDelaySeconds: 10 + name: router + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 1936 + name: stats + protocol: TCP + readinessProbe: + httpGet: + host: localhost + path: healthz/ready + port: 1936 + initialDelaySeconds: 10 + resources: + requests: + cpu: 100m + memory: 256Mi + hostNetwork: true diff --git a/tests/run-in-kind/router/router_rbac.yaml b/tests/run-in-kind/router/router_rbac.yaml new file mode 100644 index 000000000..c85a3fb07 --- /dev/null +++ b/tests/run-in-kind/router/router_rbac.yaml @@ -0,0 +1,68 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: openshift-ingress-router +rules: +- apiGroups: + - "" + resources: + - namespaces + - services + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - route.openshift.io + resources: + - routes + verbs: + - list + - watch +- apiGroups: + - route.openshift.io + resources: + - routes/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-ingress-router +roleRef: + apiGroup: "" + kind: ClusterRole + name: openshift-ingress-router +subjects: +- kind: ServiceAccount + namespace: openshift-ingress + name: ingress-router +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: openshift-ingress-router-auth-delegator +roleRef: + apiGroup: "" + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + namespace: openshift-ingress + name: ingress-router + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-ingress +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-router + namespace: openshift-ingress diff --git a/tests/run-in-kind/run-e2e-in-kind.sh b/tests/run-in-kind/run-e2e-in-kind.sh new file mode 100755 index 000000000..da7565a9f --- /dev/null +++ b/tests/run-in-kind/run-e2e-in-kind.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +set -exo pipefail + +ROOTDIR="$(cd "$(dirname "$0")/../.." ; pwd -P)" +WORKDIR=${ROOTDIR}/tests/run-in-kind + +export IS_KIND_ENV=true + +# shellcheck disable=SC1091 +source ${WORKDIR}/env.sh + +setup_kubectl_command() { + if ! command -v kubectl >/dev/null 2>&1; then + echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" + if [[ "$(uname)" == "Linux" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl + elif [[ "$(uname)" == "Darwin" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/darwin/amd64/kubectl + fi + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + fi +} + +create_kind_cluster() { + if ! command -v kind >/dev/null 2>&1; then + echo "This script will install kind (https://kind.sigs.k8s.io/) on your machine." + curl -Lo ./kind-amd64 "https://kind.sigs.k8s.io/dl/v0.10.0/kind-$(uname)-amd64" + chmod +x ./kind-amd64 + sudo mv ./kind-amd64 /usr/local/bin/kind + fi + echo "Delete the KinD cluster if exists" + kind delete cluster --name $1 || true + rm -rf $HOME/.kube/kind-config-$1 + + echo "Start KinD cluster with the default cluster name - $1" + kind create cluster --kubeconfig $HOME/.kube/kind-config-$1 --name $1 --config ${WORKDIR}/kind/kind-$1.config.yaml + export KUBECONFIG=$HOME/.kube/kind-config-$1 +} + +deploy_service_ca_operator() { + kubectl create ns openshift-config-managed + kubectl apply -f ${WORKDIR}/service-ca/ +} + +deploy_crds() { + kubectl apply -f ${WORKDIR}/req_crds/ +} + +deploy_templates() { + kubectl apply -f ${WORKDIR}/templates/ +} + +deploy_openshift_router() { + kubectl create ns openshift-ingress + kubectl apply -f ${WORKDIR}/router/ +} + +setup_e2e_test_env() { + ${ROOTDIR}/cicd-scripts/setup-e2e-tests.sh +} + +run_e2e_test() { + ${ROOTDIR}/cicd-scripts/run-e2e-tests.sh +} + +run() { + setup_kubectl_command + create_kind_cluster hub + deploy_crds + deploy_templates + deploy_service_ca_operator + deploy_openshift_router + setup_e2e_test_env + run_e2e_test +} + +run diff --git a/tests/run-in-kind/service-ca/00_roles.yaml b/tests/run-in-kind/service-ca/00_roles.yaml new file mode 100644 index 000000000..586b54cc5 --- /dev/null +++ b/tests/run-in-kind/service-ca/00_roles.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:operator:service-ca-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: service-ca-operator + namespace: openshift-service-ca-operator \ No newline at end of file diff --git a/tests/run-in-kind/service-ca/01_namespace.yaml b/tests/run-in-kind/service-ca/01_namespace.yaml new file mode 100644 index 000000000..0e0993b75 --- /dev/null +++ b/tests/run-in-kind/service-ca/01_namespace.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + openshift.io/run-level: "1" + openshift.io/cluster-monitoring: "true" + name: openshift-service-ca-operator + annotations: + openshift.io/node-selector: "" diff --git a/tests/run-in-kind/service-ca/02_service.yaml b/tests/run-in-kind/service-ca/02_service.yaml new file mode 100644 index 000000000..f62abfed8 --- /dev/null +++ b/tests/run-in-kind/service-ca/02_service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.openshift.io/serving-cert-secret-name: serving-cert + labels: + app: service-ca-operator + name: metrics + namespace: openshift-service-ca-operator +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: 8443 + selector: + app: service-ca-operator + sessionAffinity: None + type: ClusterIP diff --git a/tests/run-in-kind/service-ca/03_cm.yaml b/tests/run-in-kind/service-ca/03_cm.yaml new file mode 100644 index 000000000..60d765600 --- /dev/null +++ b/tests/run-in-kind/service-ca/03_cm.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: openshift-service-ca-operator + name: service-ca-operator-config +data: + operator-config.yaml: | + apiVersion: operator.openshift.io/v1alpha1 + kind: GenericOperatorConfig diff --git a/tests/run-in-kind/service-ca/03_operator.cr.yaml b/tests/run-in-kind/service-ca/03_operator.cr.yaml new file mode 100644 index 000000000..4413f8bde --- /dev/null +++ b/tests/run-in-kind/service-ca/03_operator.cr.yaml @@ -0,0 +1,8 @@ +apiVersion: operator.openshift.io/v1 +kind: ServiceCA +metadata: + name: cluster + annotations: + release.openshift.io/create-only: "true" +spec: + managementState: Managed diff --git a/tests/run-in-kind/service-ca/04_sa.yaml b/tests/run-in-kind/service-ca/04_sa.yaml new file mode 100644 index 000000000..61ca3fb40 --- /dev/null +++ b/tests/run-in-kind/service-ca/04_sa.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: openshift-service-ca-operator + name: service-ca-operator + labels: + app: service-ca-operator diff --git a/tests/run-in-kind/service-ca/05_deploy.yaml b/tests/run-in-kind/service-ca/05_deploy.yaml new file mode 100644 index 000000000..ef1720b0a --- /dev/null +++ b/tests/run-in-kind/service-ca/05_deploy.yaml @@ -0,0 +1,66 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: openshift-service-ca-operator + name: service-ca-operator + labels: + app: service-ca-operator +spec: + replicas: 1 + selector: + matchLabels: + app: service-ca-operator + template: + metadata: + name: service-ca-operator + labels: + app: service-ca-operator + spec: + serviceAccountName: service-ca-operator + containers: + - name: service-ca-operator + image: quay.io/openshift/origin-service-ca-operator:4.6 + imagePullPolicy: IfNotPresent + command: ["service-ca-operator", "operator"] + args: + - "--config=/var/run/configmaps/config/operator-config.yaml" + - "-v=4" + resources: + requests: + memory: 80Mi + cpu: 10m + env: + - name: CONTROLLER_IMAGE + value: quay.io/openshift/origin-service-ca-operator:4.6 + - name: OPERATOR_IMAGE_VERSION + value: "0.0.1-snapshot" + volumeMounts: + - mountPath: /var/run/configmaps/config + name: config + - mountPath: /var/run/secrets/serving-cert + name: serving-cert + volumes: + - name: serving-cert + secret: + defaultMode: 400 + secretName: serving-cert + optional: true + - name: config + configMap: + defaultMode: 440 + name: service-ca-operator-config + nodeSelector: + node-role.kubernetes.io/master: "" + priorityClassName: "system-cluster-critical" + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: "NoSchedule" + - key: "node.kubernetes.io/unreachable" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 120 + - key: "node.kubernetes.io/not-ready" + operator: "Exists" + effect: "NoExecute" + tolerationSeconds: 120 diff --git a/tests/run-in-kind/service-ca/07_clusteroperator.yaml b/tests/run-in-kind/service-ca/07_clusteroperator.yaml new file mode 100644 index 000000000..0f814dc3c --- /dev/null +++ b/tests/run-in-kind/service-ca/07_clusteroperator.yaml @@ -0,0 +1,9 @@ +apiVersion: config.openshift.io/v1 +kind: ClusterOperator +metadata: + name: service-ca +spec: {} +status: + versions: + - name: operator + version: "0.0.1-snapshot" diff --git a/tests/run-in-kind/templates/cluster-monitoring-view.yaml b/tests/run-in-kind/templates/cluster-monitoring-view.yaml new file mode 100644 index 000000000..a02a8e944 --- /dev/null +++ b/tests/run-in-kind/templates/cluster-monitoring-view.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-monitoring-view +rules: +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 000000000..e2633dfd7 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,88 @@ +# How to design a grafana dashboard + +## Prerequisites + +You must enable the observability service by creating a MultiClusterObservability CustomResource (CR) instance. + +## Setup grafana develop instance + +Firstly, you should use this script `setup-grafana-dev.sh` to setup your grafana instance. + +``` +$ ./setup-grafana-dev.sh --deploy +secret/grafana-dev-config created +deployment.apps/grafana-dev created +service/grafana-dev created +ingress.extensions/grafana-dev created +``` + +## Switch user to be grafana admin + +Secondly, you need to ask a user to login `https://$ACM_URL/grafana-dev/` before use this script `switch-to-grafana-admin.sh` to switch the user to be a grafana admin. + +``` +$ ./switch-to-grafana-admin.sh kube:admin +User switched to be grafana admin +``` + +## Design your grafana dashboard + +Now, refresh the grafana console and follow these steps to design your dashboard: + +1. Click the **+** icon on the left panel, select **Create Dashboard**, and then click **Add new panel**. +2. In the New Dashboard/Edit Panel view, go to the **Query** tab. +3. Configure your query by selecting `Observatorium` from the data source selector and enter a PromQL query. +4. Click the **Save** icon in the top right corner of your screen to save the dashboard. +5. Add a descriptive name, and then click **Save**. + +You can use this script `generate-dashboard-configmap-yaml.sh` to generate a dashboard configmap and save it to local. + +``` +./generate-dashboard-configmap-yaml.sh "Your Dashboard Name" +Save dashboard to ./your-dashboard-name.yaml +``` + +If you have not permission to run this script `generate-dashboard-configmap-yaml.sh`, you can following these steps to create a dashboard configmap: + +1. Go to a dashboard, click the **Dashboard settings** icon. +2. Click the **JSON Model** icon on the left panel. +3. Copy the dashboard json data and put it in to `$your-dashboard-name` field. +4. Modify `$your-dashboard-name` field. + +```yaml +kind: ConfigMap +apiVersion: v1 +metadata: + name: $your-dashboard-name + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: "true" +data: + $your-dashboard-name.json: | + $your_dashboard_json +``` + +Note: if your dashboard is not in `General` folder, you can specify the folder name in `annotations` of this ConfigMap: +``` +annotations: + observability.open-cluster-management.io/dashboard-folder: Custom +``` + +6. Update metrics allowlist + +When you generate a new dashboard like [example/custom-dashboard.yaml](example/custom-dashboard.yaml), there may have no data when you first create it. This is because it depends on some new metrics which don't upload to hub by default. You also need to update custom metrics allowlist, so that new metrics can be uploaded to the server and shown in dashboard. In this example, run the following command to update metrics. +```yaml +oc apply -f observability-metrics-custom-allowlist.yaml +``` + +## Uninstall grafana develop instance + +You can use the following command to uninstall your grafana instance. + +``` +$ ./setup-grafana-dev.sh --clean +secret "grafana-dev-config" deleted +deployment.apps "grafana-dev" deleted +service "grafana-dev" deleted +ingress.extensions "grafana-dev" deleted +``` diff --git a/tools/example/custom-dashboard.yaml b/tools/example/custom-dashboard.yaml new file mode 100644 index 000000000..b6a8e2ba6 --- /dev/null +++ b/tools/example/custom-dashboard.yaml @@ -0,0 +1,12 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: custom-dashboard + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: Custom + labels: + grafana-custom-dashboard: "true" +data: + custom-dashboard.json: | + {"style": "dark", "templating": {"list": [{"regex": "", "skipUrlSync": false, "hide": 0, "description": null, "multi": false, "refresh": 1, "label": null, "current": {"text": "Observatorium", "selected": false, "value": "Observatorium"}, "error": null, "query": "prometheus", "includeAll": true, "type": "datasource", "options": [], "name": "datasource"}, {"query": {"query": "label_values(node_cpu_seconds_total, cluster)", "refId": "StandardVariableQuery"}, "regex": "", "skipUrlSync": false, "hide": 2, "allValue": null, "label": null, "current": {"text": "local-cluster", "selected": false, "value": "local-cluster"}, "useTags": false, "type": "query", "sort": 1, "description": null, "tags": [], "definition": "label_values(node_cpu_seconds_total, cluster)", "multi": false, "name": "cluster", "refresh": 2, "tagValuesQuery": "", "datasource": null, "error": null, "tagsQuery": "", "options": [], "includeAll": false}]}, "links": [], "tags": [], "graphTooltip": 0, "hideControls": false, "title": "Custom Dashboard", "editable": true, "id": 24, "gnetId": null, "timepicker": {}, "version": 1, "time": {"to": "now", "from": "now-6h"}, "timezone": "utc", "schemaVersion": 27, "panels": [{"bars": false, "timeFrom": null, "hiddenSeries": false, "thresholds": [], "spaceLength": 10, "nullPointMode": "null as zero", "pluginVersion": "7.4.2", "renderer": "flot", "gridPos": {"y": 0, "h": 9, "w": 12, "x": 0}, "linewidth": 1, "steppedLine": false, "targets": [{"expr": "avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__interval])) by (namespace)", "interval": "", "refId": "A", "legendFormat": "{{namespace}}"}], "fill": 1, "title": "Average Container Bandwidth by Namespace: Received", "tooltip": {"sort": 0, "shared": true, "value_type": "individual"}, "id": 2, "points": false, "xaxis": {"buckets": null, "values": [], "mode": "time", "name": null, "show": true}, "seriesOverrides": [], "percentage": false, "type": "graph", "dashes": false, "interval": "1m", "dashLength": 10, "stack": true, "fieldConfig": {"overrides": [], "defaults": {"custom": {}}}, "yaxis": {"align": false, "alignLevel": null}, "timeShift": null, "aliasColors": {}, "lines": true, "legend": {"avg": false, "min": false, "max": false, "show": true, "current": false, "values": false, "total": false}, "yaxes": [{"logBase": 1, "format": "short", "max": null, "min": null, "label": null, "show": true}, {"logBase": 1, "format": "short", "max": null, "min": null, "label": null, "show": true}], "datasource": null, "pointradius": 2, "timeRegions": [], "options": {"alertThreshold": true}, "fillGradient": 0}], "annotations": {"list": [{"enable": true, "hide": true, "name": "Annotations & Alerts", "builtIn": 1, "datasource": "-- Grafana --", "type": "dashboard", "iconColor": "rgba(0, 211, 255, 1)"}]}} diff --git a/tools/example/observability-metrics-custom-allowlist.yaml b/tools/example/observability-metrics-custom-allowlist.yaml new file mode 100644 index 000000000..14cc1b9ce --- /dev/null +++ b/tools/example/observability-metrics-custom-allowlist.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: observability-metrics-custom-allowlist + namespace: open-cluster-management-observability +data: + metrics_list.yaml: | + names: + - container_network_receive_bytes_total diff --git a/tools/generate-dashboard-configmap-yaml.sh b/tools/generate-dashboard-configmap-yaml.sh new file mode 100755 index 000000000..972b0931b --- /dev/null +++ b/tools/generate-dashboard-configmap-yaml.sh @@ -0,0 +1,139 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +obs_namespace='open-cluster-management-observability' + +if command -v python &> /dev/null +then + PYTHON_CMD="python" +elif command -v python2 &> /dev/null +then + PYTHON_CMD="python2" +elif command -v python3 &> /dev/null +then + PYTHON_CMD="python3" +else + echo "Failed to found python command, please install firstly" + exit 1 +fi + +usage() { + cat <" + exit 1 + fi + fi + + podName=`kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'` + if [ $? -ne 0 ] || [ -z "$podName" ]; then + echo "Failed to get grafana pod name, please check your grafana-dev deployment" + exit 1 + fi + + curlCMD="kubectl exec -it -n "$obs_namespace" $podName -c grafana-dashboard-loader -- /usr/bin/curl" + XForwardedUser="WHAT_YOU_ARE_DOING_IS_VOIDING_SUPPORT_0000000000000000000000000000000000000000000000000000000000000000" + dashboards=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User: $XForwardedUser" 127.0.0.1:3001/api/search` + if [ $? -ne 0 ]; then + echo "Failed to search dashboards, please check your grafana-dev instance" + exit 1 + fi + + dashboard=`echo $dashboards | $PYTHON_CMD -c "import sys, json;[sys.stdout.write(json.dumps(dash)) for dash in json.load(sys.stdin) if dash['title'] == '$org_dashboard_name']"` + + dashboardUID=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['uid'])" 2>/dev/null` + dashboardFolderId=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderId'])" 2>/dev/null` + dashboardFolderTitle=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderTitle'])" 2>/dev/null` + + dashboardJson=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/dashboards/uid/$dashboardUID | $PYTHON_CMD -c "import sys, json; print(json.dumps(json.load(sys.stdin)['dashboard']))" 2>/dev/null` + if [ $? -ne 0 ]; then + echo "Failed to fetch dashboard json data, please check your dashboard name <$org_dashboard_name>" + exit 1 + fi + + # delete dashboard uid avoid conflict with old dashboard + dashboardJson=`echo $dashboardJson | $PYTHON_CMD -c "import sys, json; d=json.load(sys.stdin);del d['uid'];print(json.dumps(d))"` + + if [ $dashboardFolderId -ne 0 ]; then + cat > $savePath/$dashboard_name.yaml < $savePath/$dashboard_name.yaml < to $savePath/$dashboard_name.yaml" +} + +start "$@" diff --git a/tools/setup-grafana-dev.sh b/tools/setup-grafana-dev.sh new file mode 100755 index 000000000..70ad3c9f1 --- /dev/null +++ b/tools/setup-grafana-dev.sh @@ -0,0 +1,180 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +obs_namespace='open-cluster-management-observability' +deploy_flag=0 + +sed_command='sed -i-e -e' +if [[ "$(uname)" == "Darwin" ]]; then + sed_command='sed -i '-e' -e' +fi + +usage() { + cat < grafana-dev-config.ini + if [ $? -ne 0 ]; then + echo "Failed to get grafana config secret" + exit 1 + fi + $sed_command "s~%(domain)s/grafana/$~%(domain)s/grafana-dev/~g" grafana-dev-config.ini + kubectl create secret generic grafana-dev-config -n "$obs_namespace" --from-file=grafana.ini=grafana-dev-config.ini + + kubectl get deployment -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml > grafana-dev-deploy.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana deployment" + exit 1 + fi + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-deploy.yaml + $sed_command "s~name: observability-grafana$~name: grafana-dev~g" grafana-dev-deploy.yaml + $sed_command "s~replicas:.*$~replicas: 1~g" grafana-dev-deploy.yaml + $sed_command "s~grafana-config$~grafana-dev-config~g" grafana-dev-deploy.yaml + $sed_command "s~app: multicluster-observability-grafana$~app: multicluster-observability-grafana-dev~g" grafana-dev-deploy.yaml + $sed_command "s~grafana-config$~grafana-dev-config~g" grafana-dev-deploy.yaml + $sed_command "s~- multicluster-observability-grafana$~- multicluster-observability-grafana-dev~g" grafana-dev-deploy.yaml + + POD_NAME=$(kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana |grep grafana|awk '{split($0, a, " "); print a[1]}' |head -n 1) + if [ -z "$POD_NAME" ]; then + echo "Failed to get grafana pod name" + exit 1 + fi + + GROUP_ID=$(kubectl get pods "$POD_NAME" -n "$obs_namespace" -o jsonpath='{.spec.securityContext.fsGroup}') + if [[ ${GROUP_ID} == "grafana" ]]; then + GROUP_ID=472 + fi + $sed_command "s~ securityContext:.*$~ securityContext: {fsGroup: ${GROUP_ID}}~g" grafana-dev-deploy.yaml + sed "s~- emptyDir: {}$~- persistentVolumeClaim:$ claimName: grafana-dev~g" grafana-dev-deploy.yaml > grafana-dev-deploy.yaml.bak + tr $ '\n' < grafana-dev-deploy.yaml.bak > grafana-dev-deploy.yaml + kubectl apply -f grafana-dev-deploy.yaml + + kubectl get svc -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml > grafana-dev-svc.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana service" + exit 1 + fi + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-svc.yaml + $sed_command "s~app: multicluster-observability-grafana$~app: multicluster-observability-grafana-dev~g" grafana-dev-svc.yaml + $sed_command "s~clusterIP:.*$~ ~g" grafana-dev-svc.yaml + # For OCP 4.7, we should remove clusterIPs filed and IPs + $sed_command "s~clusterIPs:.*$~ ~g" grafana-dev-svc.yaml + $sed_command 's/\- [0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}//g' grafana-dev-svc.yaml + kubectl apply -f grafana-dev-svc.yaml + + kubectl get ingress -n "$obs_namespace" grafana -o yaml > grafana-dev-ingress.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana ingress" + exit 1 + fi + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-ingress.yaml + $sed_command "s~serviceName: grafana$~serviceName: grafana-dev~g" grafana-dev-ingress.yaml + $sed_command "s~path: /grafana$~path: /grafana-dev~g" grafana-dev-ingress.yaml + kubectl apply -f grafana-dev-ingress.yaml + + cat >grafana-pvc.yaml <&2 -e "${1-}" +} + +die() { + local msg=$1 + local code=${2-1} + msg "$msg" + exit "$code" +} + +start() { + if [ $# -eq 0 -o $# -gt 3 ]; then + usage + fi + + while [[ $# -gt 0 ]] + do + key="$1" + case $key in + -h|--help) + usage + ;; + + -n|--namespace) + obs_namespace="$2" + shift + shift + ;; + + -c|--clean) + clean + exit 0 + ;; + + -d|--deploy) + deploy_flag=1 + shift + ;; + + *) + usage + ;; + esac + done + + if [ $deploy_flag -eq 1 ]; then + deploy + exit + fi +} + +start "$@" diff --git a/tools/simulator/alert-forward/README.md b/tools/simulator/alert-forward/README.md new file mode 100644 index 000000000..2e8fd8ab2 --- /dev/null +++ b/tools/simulator/alert-forward/README.md @@ -0,0 +1,61 @@ +# Alert Forward Simulator + +The alert forward simulator can be used to simulate multiple Prometheus instances to forward alerts to the Alertmanager in the ACM hub cluster. + +## Prereqs + +You must meet the following requirements to setup metrics collector: + +1. ACM 2.3+ available +2. `MultiClusterObservability` instance available in the hub cluster + +## How to use + +1. Export host of the Alertmanager in the ACM hub cluster. + +``` +export ALERTMANAGER_HOST=$(oc -n open-cluster-management-observability get route alertmanager -o jsonpath="{.spec.host}") +``` + +2. Export access token to the Alertmanager in the ACM hub cluster. + +``` +export ALERRTMANAGER_ACCESS_TOKEN=$(oc -n open-cluster-management-observability get secret $(oc -n open-cluster-management-observability get sa observability-alertmanager-accessor -o yaml | grep observability-alertmanager-accessor-token | cut -d' ' -f3) -o jsonpath="{.data.token}" | base64 -d) +``` + +3. (Optional)Export simulated max go routine number for sending alert, if not set, default value(20) will be used. + +``` +export MAX_ALERT_SEND_ROUTINE=5 +``` + +4. (Optional) Export alert send interval, if not set, default value(5 seconds) will be used. + +``` +export ALERT_SEND_INTERVAL=10s +``` + +5. Run the simulator to send fake alerts to the Alertmanager in the ACM hub cluster. + +``` +# go run ./tools/simulator/alert-forward/main.go +2021/10/12 04:22:50 sending alerts with go routine 0 +2021/10/12 04:22:50 conn was reused: false +2021/10/12 04:22:50 send routine 0 done +2021/10/12 04:22:55 sending alerts with go routine 1 +2021/10/12 04:22:55 conn was reused: true +2021/10/12 04:22:55 send routine 1 done +2021/10/12 04:23:00 sending alerts with go routine 2 +2021/10/12 04:23:00 conn was reused: true +2021/10/12 04:23:00 send routine 2 done +2021/10/12 04:23:05 sending alerts with go routine 3 +2021/10/12 04:23:05 conn was reused: true +2021/10/12 04:23:05 send routine 3 done +2021/10/12 04:23:10 sending alerts with go routine 4 +2021/10/12 04:23:10 conn was reused: true +2021/10/12 04:23:10 send routine 4 done +2021/10/12 04:23:15 sending alerts with go routine 5 +2021/10/12 04:23:15 conn was reused: true +2021/10/12 04:23:15 send routine 5 done +``` + diff --git a/tools/simulator/alert-forward/main.go b/tools/simulator/alert-forward/main.go new file mode 100644 index 000000000..873dc33b8 --- /dev/null +++ b/tools/simulator/alert-forward/main.go @@ -0,0 +1,168 @@ +package main + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "log" + "net/url" + "net/http" + "net/http/httptrace" + "os" + "strconv" + "sync" + "time" + + "github.com/pkg/errors" + config_util "github.com/prometheus/common/config" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" +) + +var alerts = `[ + { + "annotations":{ + "description":"just for testing\n", + "summary":"An alert that is for testing." + }, + "receivers":[ + { + "name":"test" + } + ], + "labels":{ + "alertname":"test", + "cluster":"testCluster", + "severity":"none" + } + } +]` + +func main() { + amHost := os.Getenv("ALERTMANAGER_HOST") + if amHost == "" { + log.Println("ALERTMANAGER_HOST must be specified!") + os.Exit(1) + } + amUrl := (&url.URL{ + Scheme: "https", + Host: amHost, + Path: "/api/v2/alerts", + }).String() + + amAccessToken := os.Getenv("ALERRTMANAGER_ACCESS_TOKEN") + if amAccessToken == "" { + log.Println("ALERRTMANAGER_ACCESS_TOKEN must be specified!") + os.Exit(1) + } + maxAlertSendRoutine := os.Getenv("MAX_ALERT_SEND_ROUTINE") + maxAlertSendRoutineNumber := 20 + if maxAlertSendRoutine == "" { + log.Println("MAX_ALERT_SEND_ROUTINE is not specified, fallback to default value: 20") + } else { + i, err := strconv.Atoi(maxAlertSendRoutine) + if err != nil { + log.Println("invalid MAX_ALERT_SEND_ROUTINE, must be number!") + os.Exit(1) + } + maxAlertSendRoutineNumber = i + } + + alertSendInterval := os.Getenv("ALERT_SEND_INTERVAL") + asInterval, err := time.ParseDuration(alertSendInterval) + if err != nil { + log.Println("invalid ALERT_SEND_INTERVAL, fallback to default value: 5s") + asInterval = 5*time.Second + } + + amCfg := createAlertmanagerConfig(amHost, amAccessToken) + + // client trace to log whether the request's underlying tcp connection was re-used + clientTrace := &httptrace.ClientTrace{ + GotConn: func(info httptrace.GotConnInfo) { log.Printf("conn was reused: %t\n", info.Reused) }, + } + traceCtx := httptrace.WithClientTrace(context.Background(), clientTrace) + + // create the http client to send alerts to alertmanager + client, err := config_util.NewClientFromConfig(amCfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled()) + if err != nil { + log.Printf("failed to create the http client: %v\n", err) + return + } + + // alerts send loop + var wg sync.WaitGroup + for i := 0; i < maxAlertSendRoutineNumber; i++ { + log.Printf("sending alerts with go routine %d\n", i) + wg.Add(1) + go func(index int, client *http.Client, traceCtx context.Context, url string, payload []byte) { + if err := sendOne(client, traceCtx, url, payload); err != nil { + log.Printf("failed to send alerts: %v\n", err) + } + wg.Done() + log.Printf("send routine %d done\n", index) + }(i, client, traceCtx, amUrl, []byte(alerts)) + + //sleep 30 for the HAProxy close the client connection + time.Sleep(asInterval) + } + wg.Wait() +} + +// createAlertmanagerConfig creates and returns the configuration for the target Alertmanager +func createAlertmanagerConfig(amHost, amAccessToken string) *config.AlertmanagerConfig { + return &config.AlertmanagerConfig{ + APIVersion: config.AlertmanagerAPIVersionV2, + PathPrefix: "/", + Scheme: "https", + Timeout: model.Duration(10 * time.Second), + HTTPClientConfig: config_util.HTTPClientConfig{ + Authorization: &config_util.Authorization{ + Type: "Bearer", + Credentials: config_util.Secret(amAccessToken), + }, + TLSConfig: config_util.TLSConfig{ + ServerName: "", + InsecureSkipVerify: true, + }, + }, + ServiceDiscoveryConfigs: discovery.Configs{ + discovery.StaticConfig{ + { + Source: amHost, + }, + }, + }, + } +} + +// send alerts to alertmanager with one http request +func sendOne(c *http.Client, traceCtx context.Context, url string, b []byte) error { + req, err := http.NewRequestWithContext(traceCtx, "POST", url, bytes.NewReader(b)) + if err != nil { + return err + } + req.Header.Set("User-Agent", "testing") + req.Header.Set("Content-Type", "application/json") + + resp, err := c.Do(req) + if err != nil { + return err + } + + defer func() { + /* #nosec */ + io.Copy(ioutil.Discard, resp.Body) + /* #nosec */ + resp.Body.Close() + }() + + // Any HTTP status 2xx is OK. + if resp.StatusCode/100 != 2 { + return errors.Errorf("bad response status %s", resp.Status) + } + + return nil +} diff --git a/tools/simulator/managed-cluster/README.md b/tools/simulator/managed-cluster/README.md new file mode 100644 index 000000000..ead780975 --- /dev/null +++ b/tools/simulator/managed-cluster/README.md @@ -0,0 +1,55 @@ +# Managed Cluster Simulator + +The managed cluster simulator can be used to set up multiple managed clusters and create the corresponding namespaces in the ACM hub cluster, to simulate reconciling thousands of managed clusters for the multicluster-observability-operator. + +## Prereqs + +You must meet the following requirements to setup metrics collector: + +1. ACM 2.1+ available +2. `MultiClusterObservability` instance available in the hub cluster + +## Quick Start + +### Scale down the controllers + +Before creating simulated managed clusters, we should scale down cluster-manager and controllers for managedcluster and manifestwork, to avoid resource conflict with the multicluster-observability-operator. Execute the following command: + +```bash +kubectl -n open-cluster-management scale deploy cluster-manager --replicas 0 +kubectl -n open-cluster-management-hub scale deploy cluster-manager-registration-controller --replicas 0 +kubectl -n open-cluster-management-agent scale deploy klusterlet --replicas 0 +kubectl -n open-cluster-management-agent scale deploy klusterlet-registration-agent --replicas 0 +kubectl -n open-cluster-management-agent scale deploy klusterlet-work-agent --replicas 0 +``` + +> Note: to make sure the controllers are not scaled up again by the operator and OLM, we also need to edit the CSV in the `open-cluster-management` to update the replicas of `cluster-manager` to be `0`. + +### Set up managed cluster simulator + +You can run `setup-managedcluster.sh` following with two numbers(start index and end index) to set up multiple simulated managedcluster. + +For example, set up 1-10 simulated managedcluster with the following command: + +```bash +# ./setup-managedcluster.sh 1 10 +``` + +Check if all the metrics collector running successfully in your cluster: + +```bash +# kubectl get managedcluster +NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE +local-cluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 True True 2d2h +simulated-1-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-2-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-3-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-4-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-5-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-6-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-7-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-8-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-9-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +simulated-10-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +``` + diff --git a/tools/simulator/managed-cluster/setup-managedcluster.sh b/tools/simulator/managed-cluster/setup-managedcluster.sh new file mode 100755 index 000000000..e644778f9 --- /dev/null +++ b/tools/simulator/managed-cluster/setup-managedcluster.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +# default kube client is kubectl, use oc if kubectl is nit installed +KUBECLIENT="kubectl" + +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECLIENT="oc" + else + if [[ "$(uname)" == "Linux" ]]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + elif [[ "$(uname)" == "Darwin" ]]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" + fi + chmod +x ${PWD}/kubectl + KUBECLIENT=${PWD}/kubectl + fi +fi + +SED_COMMAND='sed -e' +if [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -e' +fi + +# temporal working directory +WORKDIR=$(mktemp -d) +${KUBECLIENT} get managedcluster local-cluster -o yaml > ${WORKDIR}/simulated-managedcluster.yaml + +# creating the simulated managedcluster +for index in $(seq $1 $2) +do + echo "Creating Simulated managedCluster simulated-${index}-managedcluster..." + ${KUBECLIENT} create ns simulated-${index}-managedcluster --dry-run -o yaml | ${KUBECLIENT} apply -f - + ${SED_COMMAND} "s~local-cluster~simulated-${index}-managedcluster~" ${WORKDIR}/simulated-managedcluster.yaml | ${KUBECLIENT} apply -f - +done + diff --git a/tools/simulator/metrics-collector/Dockerfile b/tools/simulator/metrics-collector/Dockerfile new file mode 100644 index 000000000..887495101 --- /dev/null +++ b/tools/simulator/metrics-collector/Dockerfile @@ -0,0 +1,3 @@ +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +COPY timeseries.txt /tmp/ diff --git a/tools/simulator/metrics-collector/Makefile b/tools/simulator/metrics-collector/Makefile new file mode 100644 index 000000000..5ef54b5d2 --- /dev/null +++ b/tools/simulator/metrics-collector/Makefile @@ -0,0 +1,43 @@ +# Copyright Contributors to the Open Cluster Management project + +METRICS_IMAGE?=quay.io/ocm-observability/metrics-data:2.4.0 +METRICS_JSON=./_output/metrics.json +BIN_DIR?=$(shell pwd)/_output/bin +GOJSONTOYAML_BIN=$(BIN_DIR)/gojsontoyaml + +export PATH := $(BIN_DIR):$(PATH) + +all: timeseries build push + +timeseries: $(METRICS_JSON) + oc port-forward -n openshift-monitoring prometheus-k8s-0 9090 > /dev/null & \ + sleep 50 ; \ + query="curl --fail --silent -G http://localhost:9090/federate"; \ + for rule in $$(cat $(METRICS_JSON) | jq -r '.[]'); do \ + query="$$query $$(printf -- "--data-urlencode match[]=%s" $$rule)"; \ + done; \ + echo '# This file was generated using `make $@`.' > $@.txt ; \ + $$query >> $@.txt ; \ + jobs -p | xargs -r kill + +$(METRICS_JSON): $(GOJSONTOYAML_BIN) + matches=`curl -L https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml | \ + $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.data."metrics_list.yaml"' | $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"'`; \ + names=`curl -L https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml | \ + $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.data."metrics_list.yaml"' | $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"'`; \ + echo $$matches $$names | jq -s . > $@ + +$(GOJSONTOYAML_BIN): $(BIN_DIR) + GOBIN=$(BIN_DIR) go get github.com/brancz/gojsontoyaml + +$(BIN_DIR): + mkdir -p $@ + +build: + docker build -t $(METRICS_IMAGE) . + +push: + docker push $(METRICS_IMAGE) + +clean: + rm -r _output && rm timeseries.txt diff --git a/tools/simulator/metrics-collector/README.md b/tools/simulator/metrics-collector/README.md new file mode 100644 index 000000000..5132bb540 --- /dev/null +++ b/tools/simulator/metrics-collector/README.md @@ -0,0 +1,53 @@ +# Metrics Collector Simulator + +Metrics collector simulator can be used to setup multiple metrics collector in different namespaces in one managed cluster, to simulate thousands of managed clusters push metrics to ACM hub cluster for scale testing. + +_Note:_ this simulator is for testing purpose only. + +## Prereqs +You must meet the following requirements to setup metrics collector: + +- ACM 2.1+ available +- `MultiClusterObservability` instance available and have following pods in `open-cluster-management-addon-observability` namespace: + + ``` + $ oc get po -n open-cluster-management-addon-observability + NAME READY STATUS RESTARTS AGE + endpoint-observability-operator-7f8f949bc8-trwzh 2/2 Running 0 118m + metrics-collector-deployment-74cbf5896f-jhg6v 1/1 Running 0 111m + ``` + +## Quick Start +### Setup metrics collector +You can run `setup-metrics-collector.sh` following with a number to setup multiple metrics collector. + +For example, setup 2 metrics collectors with 100 workers by the following command: +``` +# ./setup-metrics-collector.sh 2 100 +``` +Check if all the metrics collector running successfully in your cluster: +``` +# oc get pods --all-namespaces | grep simulate-managed-cluster +simulate-managed-cluster1 metrics-collector-deployment-7d69d9f897-xn8vz 1/1 Running 0 22h +simulate-managed-cluster2 metrics-collector-deployment-67844bfc59-lwchn 1/1 Running 0 22h +``` +It simulates 200 metrics collectors to push the data into hub thanos. + +> Note: if you want the simulated metrics-collector be scheduled to master node, so that more simulated metrics-collectors can be deployed, you can set the environment variable `ALLOW_SCHEDULED_TO_MASTER` to be `true` before executing the setup script. + +### Clean metrics collector +Use `clean-metrics-collector.sh` to remove all metrics collector you created. +``` +# ./clean-metrics-collector.sh 10 +``` + +## Generate your own metrics data source +By default, `setup-metrics-collector.sh` is using metrics data defined in env `METRICS_IMAGE` as data source. You can build and push your own metrics data image with below command: +``` +# METRICS_IMAGE= make all +``` +## Setup metrics collector with your own metrics data source +Running below command to setup metrics collectors with your own data source: +``` +# METRICS_IMAGE= ./setup-metrics-collector.sh 10 +``` diff --git a/tools/simulator/metrics-collector/clean-metrics-collector.sh b/tools/simulator/metrics-collector/clean-metrics-collector.sh new file mode 100755 index 000000000..79833ed37 --- /dev/null +++ b/tools/simulator/metrics-collector/clean-metrics-collector.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +sed_command='sed -i' +managed_cluster='managed' +if [ $# -eq 2 ]; then + managed_cluster=$2 +fi + +if [ $# -lt 1 ]; then + echo "this script must be run with the number of clusters:" + echo -e "\n$0 total_clusters\n" + exit 1 +fi + +re='^[0-9]+$' +if ! [[ $1 =~ $re ]] ; then + echo "error: arguments <$1> not a number" >&2; exit 1 +fi + +for i in $(seq 1 $1) +do + cluster_name=simulate-${managed_cluster}-cluster${i} + kubectl delete deploy -n ${cluster_name} metrics-collector-deployment + kubectl delete clusterrolebinding ${cluster_name}-clusters-metrics-collector-view + kubectl delete -n ${cluster_name} secret/observability-managed-cluster-certs + kubectl delete ns ${cluster_name} +done diff --git a/tools/simulator/metrics-collector/metrics-collector-view.yaml b/tools/simulator/metrics-collector/metrics-collector-view.yaml new file mode 100644 index 000000000..c28cf7314 --- /dev/null +++ b/tools/simulator/metrics-collector/metrics-collector-view.yaml @@ -0,0 +1,14 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __CLUSTER_NAME__-clusters-metrics-collector-view + annotations: + owner: multicluster-operator +subjects: + - kind: ServiceAccount + name: endpoint-observability-operator-sa + namespace: __CLUSTER_NAME__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-monitoring-view \ No newline at end of file diff --git a/tools/simulator/metrics-collector/setup-metrics-collector.sh b/tools/simulator/metrics-collector/setup-metrics-collector.sh new file mode 100755 index 000000000..f683e9d0b --- /dev/null +++ b/tools/simulator/metrics-collector/setup-metrics-collector.sh @@ -0,0 +1,97 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +METRICS_IMAGE="${METRICS_IMAGE:-quay.io/ocm-observability/metrics-data:2.4.0}" +WORKDIR="$(pwd -P)" +export PATH=${PATH}:${WORKDIR} + +if ! command -v jq &> /dev/null; then + if [[ "$(uname)" == "Linux" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + elif [[ "$(uname)" == "Darwin" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 + fi + chmod +x ./jq +fi + +sed_command='sed -i' +if [[ "$(uname)" == "Darwin" ]]; then + sed_command='sed -i -e' +fi + +managed_cluster='managed' +if [ $# -eq 3 ]; then + managed_cluster=$3 +fi + +if [ $# -lt 1 ]; then + echo "this script must be run with the number of metrics-collector:" + echo -e "\n$0 total_collectors\n" + exit 1 +fi + +re='^[0-9]+$' +if ! [[ $1 =~ $re ]] ; then + echo "error: arguments <$1> not a number" >&2; exit 1 +fi + +workers=1 +if [ $# -gt 2 ]; then + workers=$2 +fi + +for i in $(seq 1 $1) +do + cluster_name=simulate-${managed_cluster}-cluster${i} + kubectl create ns ${cluster_name} + + # create ca/sa/rolebinding for metrics collector + kubectl get configmap metrics-collector-serving-certs-ca-bundle -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-managed-cluster-certs -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get sa endpoint-observability-operator-sa -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl -n ${cluster_name} patch secret observability-managed-cluster-certs --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + kubectl -n ${cluster_name} patch sa endpoint-observability-operator-sa --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + + # deploy metrics collector deployment to cluster ns + deploy_yaml_file=${cluster_name}-metrics-collector-deployment.json + kubectl get deploy metrics-collector-deployment -n open-cluster-management-addon-observability -o json > $deploy_yaml_file + + # replace namespace, cluster and clusterID. Insert --simulated-timeseries-file + uuid=$(cat /proc/sys/kernel/random/uuid) + jq \ + --arg cluster_name $cluster_name \ + --arg cluster "--label=\"cluster=$cluster_name\"" \ + --arg clusterID "--label=\"clusterID=$uuid\"" \ + --arg workerNum "--worker-number=$workers" \ + --arg file "--simulated-timeseries-file=/metrics-volume/timeseries.txt" \ + '.metadata.namespace=$cluster_name | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $cluster |.spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $clusterID | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $file | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $workerNum' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file + + # insert metrics initContainer + jq \ + --argjson init '{"initContainers": [{"command":["sh","-c","cp /tmp/timeseries.txt /metrics-volume"],"image":"'$METRICS_IMAGE'","imagePullPolicy":"Always","name":"init-metrics","volumeMounts":[{"mountPath":"/metrics-volume","name":"metrics-volume"}]}]}' \ + --argjson emptydir '{"emptyDir": {}, "name": "metrics-volume"}' \ + --argjson metricsdir '{"mountPath": "/metrics-volume","name": "metrics-volume"}' \ + '.spec.template.spec += $init | .spec.template.spec.volumes += [$emptydir] | .spec.template.spec.containers[0].volumeMounts += [$metricsdir]' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file + + if [ "$ALLOW_SCHEDULED_TO_MASTER" == "true" ]; then + # insert tolerations + jq \ + --argjson tolerations '{"tolerations": [{"key":"node-role.kubernetes.io/master","operator":"Exists","effect":"NoSchedule"}]}' \ + '.spec.template.spec += $tolerations' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file + fi + + cat "$deploy_yaml_file" | kubectl -n ${cluster_name} apply -f - + rm -rf "$deploy_yaml_file" "$deploy_yaml_file".tmp + kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' + + # deploy ClusterRoleBinding for read metrics from OCP prometheus + rolebinding_yaml_file=${cluster_name}-metrics-collector-view.yaml + cp -rf metrics-collector-view.yaml "$rolebinding_yaml_file" + $sed_command "s~__CLUSTER_NAME__~${cluster_name}~g" "$rolebinding_yaml_file" + cat "$rolebinding_yaml_file" | kubectl -n ${cluster_name} apply -f - + rm -rf "$rolebinding_yaml_file" + +done diff --git a/tools/switch-to-grafana-admin.sh b/tools/switch-to-grafana-admin.sh new file mode 100755 index 000000000..d425e177f --- /dev/null +++ b/tools/switch-to-grafana-admin.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +obs_namespace='open-cluster-management-observability' + +if command -v python &> /dev/null +then + PYTHON_CMD="python" +elif command -v python2 &> /dev/null +then + PYTHON_CMD="python2" +elif command -v python3 &> /dev/null +then + PYTHON_CMD="python3" +else + echo "Failed to found python command, please install firstly" + exit 1 +fi + +usage() { + cat </dev/null` + if [ $? -ne 0 ]; then + echo "Failed to fetch user ID, please check your user name" + exit 1 + fi + + orgID=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/users/lookup?loginOrEmail=$username_no_num_sign | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['orgId'])" 2>/dev/null` + if [ $? -ne 0 ]; then + echo "Failed to fetch organization ID, please check your user name" + exit 1 + fi + + $curlCMD -s -X DELETE -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users/$userID > /dev/null + if [ $? -ne 0 ]; then + echo "Failed to delete user <$user_name>" + exit 1 + fi + + $curlCMD -s -X POST -H "Content-Type: application/json" -d "{\"loginOrEmail\":\"$user_name\", \"role\": \"Admin\"}" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users > /dev/null + if [ $? -ne 0 ]; then + echo "Failed to switch the user <$user_name> to be grafana admin" + exit 1 + fi + echo "User <$user_name> switched to be grafana admin" + + # disable getting start +# kubectl exec -it -n "$obs_namespace" $podName -c grafana-dev -- sqlite3 /var/lib/grafana/grafana.db "update user set help_flags1=1 where id=$userID;" > /dev/null +# if [ $? -ne 0 ]; then +# echo "Failed to disable getting start for the user <$user_name>" +# exit 1 +# fi + +} + +start "$@" From 5b27b831a7bee0795a74ae26efde314b887ac92b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 17 Jan 2022 16:09:34 +0800 Subject: [PATCH 003/150] update cert name to compliance QE customized setting Signed-off-by: Chang Liang Qu --- .DS_Store | Bin 0 -> 6148 bytes tests/pkg/utils/mco_router_ca.go | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Tue, 18 Jan 2022 20:47:33 +0800 Subject: [PATCH 004/150] update Polarion ID Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 8 +- tests/pkg/tests/observability_alert_test.go | 20 +-- tests/pkg/tests/results.xml | 135 +++++++++++++++++ tests/pkg/tests/results.xml.addon | 152 ++++++++++++++++++++ 4 files changed, 301 insertions(+), 14 deletions(-) create mode 100644 tests/pkg/tests/results.xml create mode 100644 tests/pkg/tests/results.xml.addon diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index b95f2288e..31df02746 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -14,7 +14,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -37,7 +37,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("[P2][Sev2][Observability] Verify monitoring operator and deployment status when metrics collection disabled (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability] (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -119,7 +119,7 @@ var _ = Describe("Observability:", func() { }) }) - It("[P3][Sev3][Observability][Stable] Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -143,7 +143,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - Context("[P2][Sev2][Observability] Disable the Observability by updating managed cluster label (addon/g0) -", func() { + Context("RHACM4K-1235: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g0) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index b214f874b..4b5f1da53 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -28,7 +28,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -50,7 +50,7 @@ var _ = Describe("Observability:", func() { } secret := "alertmanager-config" - It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the expected statefulsets (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{LabelSelector: label}) @@ -71,7 +71,7 @@ var _ = Describe("Observability:", func() { } }) - It("[P2][Sev2][Observability][Stable] Verify alert is created and received - Should have the expected configmap (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable] (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -80,7 +80,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("[P3][Sev3][Observability][Stable] Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable] (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -93,7 +93,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the expected secret (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("@BVT - [P1][Sev1][Observability][Stable] Verify alert is created and received - Should have the alertmanager configured in rule (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -129,7 +129,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("[P2][Sev2][Observability][Stable] Verify alert is created and received - Should have custom alert generated (alert/g0)", func() { + It("[RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable] (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -188,7 +188,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("[P2][Sev2][Observability][Stable] Updated alert rule can take effect automatically - Should have custom alert updated (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable] (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_invalid"}) @@ -208,7 +208,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("[P2][Sev2][Observability][Stable] Updated alert rule can take effect automatically - delete the customized rules (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable] (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -247,7 +247,7 @@ var _ = Describe("Observability:", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("[P2][Sev2][Observability][Integration] Should have alert named Watchdog forwarded to alertmanager (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration] (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml new file mode 100644 index 000000000..21124cadc --- /dev/null +++ b/tests/pkg/tests/results.xml @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.addon b/tests/pkg/tests/results.xml.addon new file mode 100644 index 000000000..a25fb7dcb --- /dev/null +++ b/tests/pkg/tests/results.xml.addon @@ -0,0 +1,152 @@ + + + + + + + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00080d300>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 + �[1mSTEP�[0m: Check addon resource requirement + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.002s. Expected success, but got an error: <*errors.errorString | 0xc000310710>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 + �[1mSTEP�[0m: Check metrics-collector resource requirement + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:60 Timed out after 1200.005s. Expected success, but got an error: <*errors.StatusError | 0xc0002d2280>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:80 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:97 Timed out after 1200.005s. Expected success, but got an error: <*errors.StatusError | 0xc000988140>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:118 + �[1mSTEP�[0m: Waiting for MCO addon components ready �[1mSTEP�[0m: Checking the status in managedclusteraddon reflects the endpoint operator status correctly + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:147 Timed out after 300.002s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:159 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file From 802811654994ed74e0e90a7596e6d76839199dac Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 19 Jan 2022 11:02:25 +0800 Subject: [PATCH 005/150] update managed cluster parameters info in jenkins Signed-off-by: Chang Liang Qu --- Jenkinsfile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 63ef4ad86..def3f2e34 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -64,9 +64,11 @@ pipeline { echo "Aborting test.. OCP HUB details are required for the test execution" exit 1 else - oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL - oc config view --minify --raw=true > ~/.kube/managed_kubeconfig - export MAKUBECONFIG=~/.kube/managed_kubeconfig + if [[ -n "${params.AWS_ACCESS_KEY_ID}" || -n "${params.AWS_ACCESS_KEY_ID}" || -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then + oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL + oc config view --minify --raw=true > ~/.kube/managed_kubeconfig + export MAKUBECONFIG=~/.kube/managed_kubeconfig + fi oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL export KUBECONFIG=~/.kube/config go mod vendor && ginkgo build ./tests/pkg/tests/ From 0cebad3813491c00375e822241a1c1261e741faf Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 20 Jan 2022 14:46:49 +0800 Subject: [PATCH 006/150] add Polarion ID for all test cases Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 2 +- .../pkg/tests/observability_certrenew_test.go | 4 +- tests/pkg/tests/observability_config_test.go | 10 +- .../pkg/tests/observability_dashboard_test.go | 8 +- .../observability_endpoint_preserve_test.go | 8 +- .../tests/observability_grafana_dev_test.go | 4 +- tests/pkg/tests/observability_grafana_test.go | 4 +- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 10 +- ...servability_observatorium_preserve_test.go | 4 +- .../pkg/tests/observability_reconcile_test.go | 12 +- .../pkg/tests/observability_retention_test.go | 12 +- tests/pkg/tests/observability_route_test.go | 2 +- tests/pkg/tests/results.xml | 124 ++---------------- 14 files changed, 52 insertions(+), 154 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 4b5f1da53..f6f94e33f 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -129,7 +129,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("[RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable] (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index afa6baf04..1edddec51 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -14,7 +14,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -27,7 +27,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - It("[P1][Sev1][Observability][Integration] Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration] (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 01d0e284e..add2e53e8 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -17,7 +17,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -30,7 +30,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - It("@BVT - [P1][Sev1][Observability][Stable] Verify metrics data global setting on the managed cluster (config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable](config/g0)", func() { if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") } @@ -43,7 +43,7 @@ var _ = Describe("Observability:", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("@BVT - [P1][Sev1][Observability][Stable] Verify MCO CR storage class and PVC (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable] (config/g0)", func() { if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") } @@ -143,7 +143,7 @@ var _ = Describe("Observability:", func() { }, } - It("@BVT - [P1][Sev1][Observability][Integration] Verify the replica in advanced config for Observability components (config/g0)", func() { + It("RHACM4K-2822: Observability: Verify the replica in advanced config for Observability components @BVT - [P1][Sev1][Observability][Integration] (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { @@ -179,7 +179,7 @@ var _ = Describe("Observability:", func() { } }) - It("[P2][Sev2][Observability][Integration] Persist advance values in MCO CR - Checking resources in advanced config (config/g0)", func() { + It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 380961a40..faf9ed5d3 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -17,7 +17,7 @@ const ( updateDashboardTitle = "Update Sample Dashboard for E2E" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -30,7 +30,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}) Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) @@ -40,7 +40,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}) Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) @@ -54,7 +54,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("[P2][Sev2][Observability][Stable] Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index bc7bfc9ef..bba4346c9 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -16,7 +16,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -29,7 +29,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][Observability] Verify metrics collector is prevent to be configured manually (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability] (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -81,7 +81,7 @@ var _ = Describe("Observability:", func() { }) }) - It("[P2][Sev2][Observability][Stable] Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable] (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -119,7 +119,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("[P2][Sev2][Observability][Stable] Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable] (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 5562e423f..d6f3c11dc 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -14,11 +14,11 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("[P1][Sev1][Observability][Integration] Setup a Grafana develop instance (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration] (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index a8f136ceb..ce20b79a2 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -12,7 +12,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -25,7 +25,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - It("@BVT - [P1][Sev1][Observability][Stable] Verify Grafana - Should have metric data in grafana console (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable] (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 7af3d5093..a0c941e3d 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -14,7 +14,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index d9daa8ff6..e5d8d453d 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -25,7 +25,7 @@ var ( metricslistError error ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -48,7 +48,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Integration] Customized metrics data are collected (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -67,7 +67,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -82,7 +82,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -97,7 +97,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("[P2][Sev2][Observability][Integration] Metrics removal from default allowlist (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Delete(context.TODO(), allowlistCMname, metav1.DeleteOptions{}) diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index a726575c1..5b30662a7 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -14,7 +14,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -27,7 +27,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - Context("[P1][Sev1][Observability] Verify Observatorium CR configuration compliance (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability] (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index e263cb26c..bf6bd6af8 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -26,7 +26,7 @@ var ( err error ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( @@ -40,7 +40,7 @@ var _ = Describe("Observability:", func() { testOptions.HubCluster.KubeContext) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { By("Modifying MCO CR for reconciling") err := utils.ModifyMCOCR(testOptions) Expect(err).ToNot(HaveOccurred()) @@ -96,7 +96,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Verify nodeSelector setting effects for Observability components (reconcile/g0)", func() { + It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -116,7 +116,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check affinity rule takes effect on Observability components (reconcile/g0)", func() { + It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { By("Checking podAntiAffinity for all pods") Eventually(func() error { err := utils.CheckAllPodsAffinity(testOptions) @@ -127,7 +127,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Customize the Observability components storage size (reconcile/g0)", func() { + It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { By("Resizing alertmanager storage") alertmans, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: ALERTMANAGER_LABEL, @@ -143,7 +143,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Revert MCO CR changes (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) if !advRetentionCon { Skip("Skip the case since " + err.Error()) diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index 917b78395..b19b64891 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -17,7 +17,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { var ( deleteDelay = "48h" @@ -59,7 +59,7 @@ var _ = Describe("Observability:", func() { } }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check compact args (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] (retention/g0):", func() { By("--delete-delay=" + deleteDelay) Eventually(func() error { compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -78,7 +78,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check store args (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] (retention/g0):", func() { By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) Eventually(func() error { stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -97,7 +97,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check receive args (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -116,7 +116,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check rule args (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -135,7 +135,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Check and tune backup retention settings in MCO CR - Check rule args (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] (retention/g0):", func() { By("--tsdb.block-duration=" + blockDuration) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 3e6863801..8e11271ca 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -24,7 +24,7 @@ var ( alertCreated bool = false ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml index 21124cadc..e797eee66 100644 --- a/tests/pkg/tests/results.xml +++ b/tests/pkg/tests/results.xml @@ -1,135 +1,33 @@ - - + + - + - + - + - + - - - - - - - - - - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file From 96ebc2a244539ab95877669849325c6bdddb8eb6 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 23 Jan 2022 09:14:48 +0800 Subject: [PATCH 007/150] update jenkins file to make manged parameters are optional Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index def3f2e34..840224ea7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -64,7 +64,7 @@ pipeline { echo "Aborting test.. OCP HUB details are required for the test execution" exit 1 else - if [[ -n "${params.AWS_ACCESS_KEY_ID}" || -n "${params.AWS_ACCESS_KEY_ID}" || -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then + if [[ -n "${params.MANAGED_CLUSTER_USER}" || -n "${params.MANAGED_CLUSTER_PASS}" || -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL oc config view --minify --raw=true > ~/.kube/managed_kubeconfig export MAKUBECONFIG=~/.kube/managed_kubeconfig From cee445ebac759a49d0556166d3ddd2377db205a5 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 23 Jan 2022 09:55:13 +0800 Subject: [PATCH 008/150] update operator in Jenkins file Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 840224ea7..735c80ee7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -64,7 +64,7 @@ pipeline { echo "Aborting test.. OCP HUB details are required for the test execution" exit 1 else - if [[ -n "${params.MANAGED_CLUSTER_USER}" || -n "${params.MANAGED_CLUSTER_PASS}" || -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then + if [[ -n "${params.MANAGED_CLUSTER_USER}" && -n "${params.MANAGED_CLUSTER_PASS}" && -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL oc config view --minify --raw=true > ~/.kube/managed_kubeconfig export MAKUBECONFIG=~/.kube/managed_kubeconfig From 9bccdf5da67cb7a9c1262f72bf631836f73ec40a Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 24 Jan 2022 14:37:18 +0800 Subject: [PATCH 009/150] update polarion ID Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 44 +++++++++---------- tests/pkg/tests/observability_metrics_test.go | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 31df02746..1286aecc0 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -93,30 +93,30 @@ var _ = Describe("", func() { return fmt.Errorf("Check no metric data in grafana console error: %v", err) }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) }) + }) - It("[Stable] Modifying MCO cr to enable observabilityaddon", func() { - Eventually(func() error { - return utils.ModifyMCOAddonSpecMetrics(testOptions, true) - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, true) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) - By("Waiting for MCO addon components ready") - Eventually(func() bool { - err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") - if len(podList.Items) == 1 && err == nil { - return true - } - return false - }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) - By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") - Eventually(func() error { - err = utils.CheckAllOBAsEnabled(testOptions) - if err != nil { - return err - } - return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) - }) + By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { @@ -143,7 +143,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - Context("RHACM4K-1235: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g0) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g0) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index e5d8d453d..427f9eca6 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -118,7 +118,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("[P2][Sev2][Observability][Integration] Should have metrics which used grafana dashboard (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration] (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() for _, name := range metricList { From 392bb009ca481222f94e30a26bef80f8caa0b640 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 27 Jan 2022 16:14:57 +0800 Subject: [PATCH 010/150] add condition for there's no managedcluster in options.xml Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 1286aecc0..00a6d6947 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -143,20 +143,23 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g0) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - By("Waiting for MCO addon components scales to 0") - Eventually(func() bool { - err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) - if err == nil && obaNS == nil { - return true - } - return false - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + klog.V(1).Infof("managedcluster number is <%d>", len(testOptions.ManagedClusters)) + if len(testOptions.ManagedClusters) > 0 { + By("Waiting for MCO addon components scales to 0") + Eventually(func() bool { + err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) + if err == nil && obaNS == nil { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + } }) It("[Stable] Remove disable observability label from the managed cluster", func() { From 97963c94224b45ae3497e9156a0acf33f2290c57 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 28 Jan 2022 22:24:01 +0800 Subject: [PATCH 011/150] rmove sensitive info according to gitleak, which not impact automation Signed-off-by: Chang Liang Qu --- docs/setup-ceph-for-object-storage.md | 6 +++--- operators/pkg/deploying/deployer_test.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/setup-ceph-for-object-storage.md b/docs/setup-ceph-for-object-storage.md index 67d59d074..630563be7 100644 --- a/docs/setup-ceph-for-object-storage.md +++ b/docs/setup-ceph-for-object-storage.md @@ -140,7 +140,7 @@ CDDQ0YU1C4A77A0GE54S ``` $ SECRET_KEY=$(oc -n rook-ceph get secret rook-ceph-object-user-object-object -o yaml | grep SecretKey | awk '{print $2}' | base64 --decode) $ echo $SECRET_KEY -awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI + ``` ### Expose Object Store externally @@ -168,7 +168,7 @@ AWS_ENDPOINT: `oc get service rook-ceph-rgw-object -n rook-ceph` and use `CLUSTE ``` [root@rook-ceph-tools /]# export AWS_ACCESS_KEY_ID=CDDQ0YU1C4A77A0GE54S -[root@rook-ceph-tools /]# export AWS_SECRET_ACCESS_KEY=awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI +[root@rook-ceph-tools /]# export AWS_SECRET_ACCESS_KEY= [root@rook-ceph-tools /]# export AWS_HOST=rook-ceph-rgw-object:8081 [root@rook-ceph-tools /]# export AWS_ENDPOINT=172.30.162.20:8081 ``` @@ -195,7 +195,7 @@ config: endpoint: rook-ceph-rgw-object-rook-ceph.apps.acm-hub.dev05.red-chesterfield.com insecure: true access_key: CDDQ0YU1C4A77A0GE54S - secret_key: awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI + secret_key: ``` ### Proceed with installation of ACM Observbility diff --git a/operators/pkg/deploying/deployer_test.go b/operators/pkg/deploying/deployer_test.go index 415d4d46e..1f975b5ae 100644 --- a/operators/pkg/deploying/deployer_test.go +++ b/operators/pkg/deploying/deployer_test.go @@ -273,7 +273,7 @@ func TestDeploy(t *testing.T) { }, Data: map[string][]byte{ "username": []byte("YWRtaW4="), - "password": []byte("MWYyZDFlMmU2N2Rm"), + "password": []byte(""), }, }, validateResults: func(client client.Client) { From 361aa2c6347588b2e0c059be5b2172ec49948ac0 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 27 Mar 2022 10:04:09 +0800 Subject: [PATCH 012/150] update main for release25 Signed-off-by: Chang Liang Qu --- CONTRIBUTING.md | 2 +- Jenkinsfile_upgrade | 79 - Makefile | 1 + README.md | 181 +- REMEDIATE.md | 1 + cicd-scripts/customize-mco.sh | 2 +- cicd-scripts/run-e2e-tests.sh | 3 + cicd-scripts/setup-e2e-tests.sh | 17 +- .../metrics/cmd/metrics-collector/main.go | 237 +- .../metrics/pkg/collectrule/evaluator.go | 362 + .../metrics/pkg/collectrule/evaluator_test.go | 265 + collectors/metrics/pkg/forwarder/forwarder.go | 105 +- collectors/metrics/pkg/metricfamily/label.go | 4 +- .../metrics/pkg/metricfamily/required.go | 7 +- .../pkg/metricsclient/metricsclient.go | 13 +- .../integration/manifests/deployment.yaml | 2 +- .../manifests/observatorium-api.yaml | 2 +- docs/MultiClusterObservability-CRD.md | 4 +- docs/setup-ceph-for-object-storage.md | 6 +- .../alerts/watchdog_rule/kustomization.yaml | 2 + .../alerts/watchdog_rule/watchdog-rule.yaml | 22 + .../dashboards/alert-dashboards/README.md | 11 + .../alert-dashboards/alert-analysis.yaml | 743 ++ .../alert-dashboards/alerts-by-cluster.yaml | 178 + .../alert-dashboards/clusters-by-alert.yaml | 180 + .../alert-dashboards/kustomization.yaml | 6 + examples/gen.go | 119 + ...servability-v1beta1-to-v1beta2-golden.yaml | 2 +- .../v1beta2/custom-certs/kustomization.yaml | 2 + .../v1beta2/custom-certs/observability.yaml | 126 + examples/mco/e2e/v1beta2/observability.yaml | 10 + examples/minio-tls/certs/ca.crt | 18 + examples/minio-tls/certs/private.key | 27 + examples/minio-tls/certs/public.crt | 18 + examples/minio-tls/kustomization.yaml | 6 + examples/minio-tls/minio-deployment.yaml | 53 + examples/minio-tls/minio-pvc.yaml | 14 + examples/minio-tls/minio-service.yaml | 13 + examples/minio-tls/minio-tls-secret.yaml | 10 + examples/minio-tls/thanos-object-storage.yaml | 21 + examples/minio/minio-secret.yaml | 11 +- go.mod | 37 +- go.sum | 384 +- .../pkg/controller/dashboard_controller.go | 29 +- operators/endpointmetrics/README.md | 3 +- .../config/manager/manager.yaml | 2 +- .../observabilityendpoint/match_evaluator.go | 35 + .../match_evaluator_test.go | 87 + .../metrics_collector.go | 98 +- .../metrics_collector_test.go | 15 +- .../observabilityaddon_controller.go | 24 +- .../ocp_monitoring_config.go | 57 +- operators/endpointmetrics/main.go | 2 + .../crd/alertmanager_crd_0_53_1.yaml | 6043 ++++++++++++ .../crd/alertmanagerconfig_crd_0_53_1.yaml | 2846 ++++++ .../prometheus/crd/kustomization.yaml | 11 + .../prometheus/crd/podmonitor_crd_0_53_1.yaml | 580 ++ .../prometheus/crd/probe_crd_0_53_1.yaml | 610 ++ .../prometheus/crd/prometheus_crd_0_53_1.yaml | 8304 +++++++++++++++++ .../crd/prometheusrule_crd_0_53_1.yaml | 100 + .../crd/servicemonitor_crd_0_53_1.yaml | 607 ++ .../crd/thanosruler_crd_0_53_1.yaml | 6204 ++++++++++++ .../prometheus/kube-prometheus-rules.yaml | 32 - .../kubernetes-monitoring-alertingrules.yaml | 962 -- .../kubernetes-monitoring-rules.yaml | 687 -- .../manifests/prometheus/kustomization.yaml | 14 +- .../prometheus/node-exporter-rules.yaml | 65 - ...prometheus-alertmanager-config-secret.yaml | 23 + .../prometheus-operator-deployment.yaml | 47 + .../prometheus/prometheus-operator-role.yaml | 90 + .../prometheus-operator-roleBinding.yaml | 13 + .../prometheus-operator-serviceAccount.yaml | 5 + .../prometheus/prometheus-resource.yaml | 73 + ... => prometheus-scrape-targets-secret.yaml} | 39 +- .../prometheus/prometheus-statefulset.yaml | 165 - .../prometheus/prometheusrules/k8s.yaml | 147 + .../kube-apiserver-availability.yaml | 119 + .../kube-apiserver-histogram.yaml | 38 + .../prometheusrules/kube-apiserver.yaml | 342 + .../kube-prometheus-general.yaml | 15 + .../kube-prometheus-node-recording.yaml | 27 + .../prometheusrules/kube-scheduler.yaml | 56 + .../prometheus/prometheusrules/kubelet.yaml | 26 + .../kubernetes-monitoring-alertingrules.yaml | 945 ++ .../prometheusrules/kustomization.yaml | 12 + .../prometheusrules/node-exporter.yaml | 67 + .../prometheus/prometheusrules/node.yaml | 35 + .../endpointmetrics/pkg/rendering/renderer.go | 90 +- .../pkg/rendering/templates/templates.go | 10 + .../multiclusterobservability_shared.go | 5 +- .../multiclusterobservability_types.go | 2 +- .../multiclusterobservability_types.go | 43 +- .../multiclusterobservability_webhook.go | 40 +- .../api/v1beta2/zz_generated.deepcopy.go | 100 +- .../core.observatorium.io_observatoria.yaml | 75 + ...bility-operator.clusterserviceversion.yaml | 14 +- ...gement.io_multiclusterobservabilities.yaml | 55 +- .../core.observatorium.io_observatoria.yaml | 75 + ...gement.io_multiclusterobservabilities.yaml | 65 +- .../config/manager/manager.yaml | 2 +- .../config/rbac/mco_role.yaml | 12 + .../multiclusterobservability/grafana.go | 27 +- .../multiclusterobservability_controller.go | 210 +- ...lticlusterobservability_controller_test.go | 67 +- .../multiclusterobservability_status.go | 6 +- .../observatorium.go | 219 +- .../observatorium_test.go | 164 +- .../storageversionmigration.go | 30 +- .../storageversionmigration_test.go | 12 +- .../endpoint_metrics_operator.go | 57 +- .../placementrule/hub_info_secret.go | 3 +- .../placementrule/hub_info_secret_test.go | 4 +- .../controllers/placementrule/manifestwork.go | 96 +- .../placementrule/manifestwork_test.go | 59 +- .../placementrule/obsaddon_test.go | 4 +- .../placementrule/placementrule_controller.go | 167 +- .../placementrule_controller_test.go | 19 +- .../controllers/placementrule/role.go | 22 +- .../controllers/placementrule/role_test.go | 22 +- .../controllers/placementrule/status.go | 7 +- .../controllers/placementrule/status_test.go | 2 +- operators/multiclusterobservability/main.go | 25 +- .../alertmanager-statefulset.yaml | 2 +- .../base/alertmanager/kustomization.yaml | 1 + .../base/alertmanager/prometheusrule.yaml | 18 + .../base/config/metrics_allowlist.yaml | 232 +- .../dash-acm-clusters-overview-ocp311.yaml | 253 +- .../grafana/dash-acm-clusters-overview.yaml | 6 +- ...dash-acm-optimization-overview-ocp311.yaml | 1433 +++ .../dash-acm-optimization-overview.yaml | 187 +- .../base/grafana/dash-cluster-rsrc-use.yaml | 4 +- .../dash-k8s-capacity-planning-ocp311.yaml | 1088 +++ .../dash-k8s-compute-resources-cluster.yaml | 236 +- ...mpute-resources-namespace-pods-ocp311.yaml | 2407 +++++ ...-k8s-compute-resources-namespace-pods.yaml | 144 +- ...compute-resources-namespace-workloads.yaml | 51 +- .../dash-k8s-compute-resources-node-pods.yaml | 76 +- ...dash-k8s-compute-resources-pod-ocp311.yaml | 1260 +++ .../dash-k8s-compute-resources-pod.yaml | 58 +- .../dash-k8s-compute-resources-workload.yaml | 52 +- ...dash-k8s-namespaces-in-cluster-ocp311.yaml | 975 ++ .../grafana/dash-k8s-networking-cluster.yaml | 4 +- .../dash-k8s-pods-in-namespace-ocp311.yaml | 1475 +++ .../dash-k8s-summary-by-node-ocp311.yaml | 2216 +++++ .../base/grafana/dash-node-rsrc-use.yaml | 4 +- .../manifests/base/grafana/deployment.yaml | 4 +- .../manifests/base/grafana/kustomization.yaml | 9 +- .../manifests/base/proxy/deployment.yaml | 6 +- .../endpoint-observability/images.yaml | 13 +- .../endpoint-observability/operator.yaml | 2 +- .../endpoint-observability/role.yaml | 56 +- .../pkg/certificates/cert_agent.go | 5 +- .../pkg/certificates/cert_controller.go | 18 +- .../pkg/certificates/certificates.go | 16 +- .../pkg/config/config.go | 81 +- .../pkg/config/obj_storage_conf.go | 46 +- .../pkg/rendering/renderer_alertmanager.go | 7 +- .../pkg/rendering/renderer_grafana.go | 3 + .../pkg/rendering/renderer_proxy.go | 12 +- .../pkg/rendering/renderer_test.go | 2 +- .../pkg/servicemonitor/sm_controller.go | 16 +- .../pkg/util/backuputil.go | 80 + .../pkg/util/client.go | 19 +- .../pkg/util/managedclusteraddon.go | 52 +- .../pkg/util/remotewriteendpoint.go | 205 + .../pkg/util/remotewriteendpoint_test.go | 83 + .../pkg/webhook/webhook_controller.go | 40 +- operators/pkg/config/config.go | 29 +- operators/pkg/config/types.go | 53 + operators/pkg/deploying/deployer.go | 92 +- operators/pkg/deploying/deployer_test.go | 2 +- .../pkg/rendering/patching/patcher_test.go | 2 +- operators/pkg/rendering/renderer.go | 59 +- operators/pkg/util/obj_compare.go | 2 + proxy/pkg/util/util.go | 50 +- tests/Dockerfile | 2 +- tests/format-results.sh | 4 +- .../observability-e2e-test_suite_test.go | 47 +- tests/pkg/tests/observability_addon_test.go | 118 +- tests/pkg/tests/observability_alert_test.go | 78 +- .../pkg/tests/observability_certrenew_test.go | 39 +- tests/pkg/tests/observability_config_test.go | 64 +- .../pkg/tests/observability_dashboard_test.go | 22 +- .../observability_endpoint_preserve_test.go | 64 +- tests/pkg/tests/observability_grafana_test.go | 6 +- tests/pkg/tests/observability_install_test.go | 92 +- .../tests/observability_manifestwork_test.go | 27 +- tests/pkg/tests/observability_metrics_test.go | 50 +- ...servability_observatorium_preserve_test.go | 49 +- .../pkg/tests/observability_reconcile_test.go | 3 +- .../pkg/tests/observability_retention_test.go | 28 +- tests/pkg/tests/observability_route_test.go | 4 +- .../pkg/tests/observability_uninstall_test.go | 8 +- tests/pkg/tests/results.xml | 33 - tests/pkg/tests/results.xml.addon | 152 - tests/pkg/utils/mco_configmaps.go | 8 +- tests/pkg/utils/mco_dashboard.go | 2 +- tests/pkg/utils/mco_deploy.go | 22 +- tests/pkg/utils/mco_deployments.go | 15 +- tests/pkg/utils/mco_managedcluster.go | 8 +- tests/pkg/utils/mco_oba.go | 36 +- tests/pkg/utils/mco_pods.go | 15 +- tests/pkg/utils/mco_router_ca.go | 20 +- tests/pkg/utils/mco_sa.go | 17 + tests/pkg/utils/mco_statefulset.go | 6 +- tests/pkg/utils/utils.go | 150 +- .../req_crds/prometheusrule-crd.yaml | 100 + tools/generate-dashboard-configmap-yaml.sh | 2 +- tools/grafana-dashboards-for-ocp3.11.md | 58 + tools/grafana-dev-config.ini | 0 tools/grafana-dev-config.ini-e | 0 tools/grafana-dev-deploy.yaml | 0 tools/ocp311-dashboards-example.png | Bin 0 -> 187880 bytes tools/simulator/README.md | 9 + tools/simulator/alert-forward/Dockerfile | 24 + tools/simulator/alert-forward/README.md | 166 +- tools/simulator/alert-forward/alerts.json | 18 + .../alert-forward/clean-alert-forwarder.sh | 21 + tools/simulator/alert-forward/deployment.yaml | 31 + tools/simulator/alert-forward/main.go | 224 +- .../alert-forward/setup-alert-forwarder.sh | 70 + tools/simulator/managed-cluster/README.md | 81 +- .../managed-cluster/clean-managedcluster.sh | 22 + .../managed-cluster/setup-managedcluster.sh | 43 +- tools/simulator/metrics-collector/Makefile | 43 - tools/simulator/metrics-collector/README.md | 122 +- .../clean-metrics-collector.sh | 70 +- .../generate-metrics-data.sh | 144 + .../metrics-collector-view.yaml | 2 +- .../setup-metrics-collector.sh | 169 +- 230 files changed, 46677 insertions(+), 3994 deletions(-) delete mode 100644 Jenkinsfile_upgrade create mode 100644 collectors/metrics/pkg/collectrule/evaluator.go create mode 100644 collectors/metrics/pkg/collectrule/evaluator_test.go create mode 100644 examples/alerts/watchdog_rule/kustomization.yaml create mode 100644 examples/alerts/watchdog_rule/watchdog-rule.yaml create mode 100644 examples/dashboards/alert-dashboards/README.md create mode 100644 examples/dashboards/alert-dashboards/alert-analysis.yaml create mode 100644 examples/dashboards/alert-dashboards/alerts-by-cluster.yaml create mode 100644 examples/dashboards/alert-dashboards/clusters-by-alert.yaml create mode 100644 examples/dashboards/alert-dashboards/kustomization.yaml create mode 100644 examples/gen.go create mode 100644 examples/mco/e2e/v1beta2/custom-certs/kustomization.yaml create mode 100644 examples/mco/e2e/v1beta2/custom-certs/observability.yaml create mode 100644 examples/minio-tls/certs/ca.crt create mode 100644 examples/minio-tls/certs/private.key create mode 100644 examples/minio-tls/certs/public.crt create mode 100644 examples/minio-tls/kustomization.yaml create mode 100644 examples/minio-tls/minio-deployment.yaml create mode 100644 examples/minio-tls/minio-pvc.yaml create mode 100644 examples/minio-tls/minio-service.yaml create mode 100644 examples/minio-tls/minio-tls-secret.yaml create mode 100644 examples/minio-tls/thanos-object-storage.yaml create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator.go create mode 100644 operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator_test.go create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/alertmanager_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/alertmanagerconfig_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/podmonitor_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/probe_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/prometheusrule_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/servicemonitor_crd_0_53_1.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/crd/thanosruler_crd_0_53_1.yaml delete mode 100644 operators/endpointmetrics/manifests/prometheus/kube-prometheus-rules.yaml delete mode 100644 operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-alertingrules.yaml delete mode 100644 operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml delete mode 100644 operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-alertmanager-config-secret.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-operator-role.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-operator-roleBinding.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-operator-serviceAccount.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-resource.yaml rename operators/endpointmetrics/manifests/prometheus/{prometheus-config.yaml => prometheus-scrape-targets-secret.yaml} (97%) delete mode 100644 operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/k8s.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-availability.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-histogram.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-general.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-node-recording.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-scheduler.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kubelet.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kubernetes-monitoring-alertingrules.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/kustomization.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/node-exporter.yaml create mode 100644 operators/endpointmetrics/manifests/prometheus/prometheusrules/node.yaml create mode 100644 operators/multiclusterobservability/manifests/base/alertmanager/prometheusrule.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-capacity-planning-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-namespaces-in-cluster-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-pods-in-namespace-ocp311.yaml create mode 100644 operators/multiclusterobservability/manifests/base/grafana/dash-k8s-summary-by-node-ocp311.yaml create mode 100644 operators/multiclusterobservability/pkg/util/backuputil.go create mode 100644 operators/multiclusterobservability/pkg/util/remotewriteendpoint.go create mode 100644 operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go create mode 100644 operators/pkg/config/types.go delete mode 100644 tests/pkg/tests/results.xml delete mode 100644 tests/pkg/tests/results.xml.addon create mode 100644 tests/run-in-kind/req_crds/prometheusrule-crd.yaml create mode 100644 tools/grafana-dashboards-for-ocp3.11.md create mode 100644 tools/grafana-dev-config.ini create mode 100644 tools/grafana-dev-config.ini-e create mode 100644 tools/grafana-dev-deploy.yaml create mode 100644 tools/ocp311-dashboards-example.png create mode 100644 tools/simulator/README.md create mode 100644 tools/simulator/alert-forward/Dockerfile create mode 100644 tools/simulator/alert-forward/alerts.json create mode 100755 tools/simulator/alert-forward/clean-alert-forwarder.sh create mode 100644 tools/simulator/alert-forward/deployment.yaml create mode 100755 tools/simulator/alert-forward/setup-alert-forwarder.sh create mode 100755 tools/simulator/managed-cluster/clean-managedcluster.sh delete mode 100644 tools/simulator/metrics-collector/Makefile create mode 100755 tools/simulator/metrics-collector/generate-metrics-data.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0d73f533d..4031824bb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,7 +32,7 @@ This can be done with the `--signoff` option to `git commit`. See the [Git docum Anyone may comment on issues and submit reviews for pull requests. However, in order to be assigned an issue or pull request, you must be a member of the -[stolostron](https://github.com/stolostron) GitHub organization. +[open-cluster-management](https://github.com/stolostron) GitHub organization. Repo maintainers can assign you an issue or pull request by leaving a `/assign ` comment on the issue or pull request. diff --git a/Jenkinsfile_upgrade b/Jenkinsfile_upgrade deleted file mode 100644 index e4a9a1c71..000000000 --- a/Jenkinsfile_upgrade +++ /dev/null @@ -1,79 +0,0 @@ -pipeline { - agent { - docker { - image 'quay.io/rhn_support_abutt/ginkgo_1_14_2-linux-go' - args '--network host -u 0:0' - } - } - parameters { - string(name:'HUB_CLUSTER_NAME', defaultValue: '', description: 'Name of Hub cluster') - string(name:'BASE_DOMAIN', defaultValue: '', description: 'Base domain of Hub cluster') - string(name:'OC_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'OCP Hub User Name') - string(name:'OC_HUB_CLUSTER_PASS', defaultValue: '', description: 'OCP Hub Password') - string(name:'OC_HUB_CLUSTER_API_URL', defaultValue: '', description: 'OCP Hub API URL') - string(name:'BUCKET', defaultValue: 'obs-v1', description: 'Bucket name') - string(name:'REGION', defaultValue: 'us-east-1', description: 'Bucket region') - password(name:'AWS_ACCESS_KEY_ID', defaultValue: '', description: 'AWS access key ID') - password(name:'AWS_SECRET_ACCESS_KEY', defaultValue: '', description: 'AWS secret access key') - string(name:'SKIP_INSTALL_STEP', defaultValue: 'false', description: 'Skip Observability installation') - string(name:'SKIP_UNINSTALL_STEP', defaultValue: 'true', description: 'Skip Observability uninstallation') - string(name:'USE_MINIO', defaultValue: 'false', description: 'If no AWS S3 bucket, you could use minio as object storage to instead') - } - environment { - CI = 'true' - AWS_SECRET_ACCESS_KEY = credentials('cqu_aws_secret_access_key') - AWS_ACCESS_KEY_ID = credentials('cqu_aws_access_key') - } - stages { - stage('Test Run') { - steps { - sh """ - export OC_CLUSTER_USER="${params.OC_CLUSTER_USER}" - export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" - export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" - export BASE_DOMAIN="${params.BASE_DOMAIN}" - export BUCKET="${params.BUCKET}" - export REGION="${params.REGION}" - export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" - export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" - - if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then - export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" - fi - - if [[ -n "${params.AWS_SECRET_ACCESS_KEY}" ]]; then - export AWS_SECRET_ACCESS_KEY="${params.AWS_SECRET_ACCESS_KEY}" - fi - - if [[ "${!params.USE_MINIO}" == false ]]; then - export IS_CANARY_ENV=true - fi - - if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then - echo "Aborting test.. OCP HUB details are required for the test execution" - exit 1 - else - oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL - export KUBECONFIG=~/.kube/config - go mod vendor && ginkgo build ./tests/pkg/tests/ - cd tests - cp resources/options.yaml.template resources/options.yaml - /usr/local/bin/yq e -i '.options.hub.name="'"\$HUB_CLUSTER_NAME"'"' resources/options.yaml - /usr/local/bin/yq e -i '.options.hub.baseDomain="'"\$BASE_DOMAIN"'"' resources/options.yaml - cat resources/options.yaml - ginkgo --focus="BVT" -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 - fi - """ - } - } - - - } - post { - always { - archiveArtifacts artifacts: 'tests/pkg/tests/*.xml', followSymlinks: false - junit 'tests/pkg/tests/*.xml' - } - } -} diff --git a/Makefile b/Makefile index cc8eada04..c0b3cb3ea 100644 --- a/Makefile +++ b/Makefile @@ -56,3 +56,4 @@ endif .PHONY: bundle bundle: cd operators/multiclusterobservability && make bundle + diff --git a/README.md b/README.md index 4d36ce5f9..855d14c1c 100644 --- a/README.md +++ b/README.md @@ -1 +1,180 @@ -# observability_core_automation \ No newline at end of file +# Observability Overview + +[![Build](https://img.shields.io/badge/build-Prow-informational)](https://prow.ci.openshift.org/?repo=stolostron%2F${multicluster-observability-operator}) +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=stolostron_multicluster-observability-operator&metric=alert_status&token=3452dcca82a98e4aa297c1b31fd21939288db4c0)](https://sonarcloud.io/dashboard?id=stolostron_multicluster-observability-operator) + + +This document attempts to explain how the different components in Open Cluster Management Observabilty come together to deliver multicluster fleet observability. We do leverage several open source projects: [Grafana](https://github.com/grafana/grafana), [Alertmanager](https://github.com/prometheus/alertmanager), [Thanos](https://github.com/thanos-io/thanos/), [Observatorium Operator and API Gateway](https://github.com/observatorium), [Prometheus](https://github.com/prometheus/prometheus); We also leverage a few [Open Cluster Mangement projects](https://open-cluster-management.io/) namely - [Cluster Manager or Registration Operator](https://github.com/stolostron/registration-operator), [Klusterlet](https://github.com/stolostron/registration-operator). The multicluster-observability operator is the root operator which pulls in all things needed. + +## Conceptual Diagram + +![Conceptual Diagram of the Components](docs/images/observability_overview_in_ocm.png) + +## Associated Github Repositories + +Component |Git Repo | Description +--- | ------ | ---- +MCO Operator | [multicluster-observability-operator](https://github.com/stolostron/multicluster-observability-operator) | Operator for monitoring. This is the root repo. If we follow the Readme instructions here to install, the code from all other repos mentioned below are used/referenced. +Endpoint Operator | [endpoint-metrics-operator](https://github.com/stolostron/multicluster-observability-operator/tree/main/operators/endpointmetrics) | Operator that manages setting up observability and data collection at the managed clusters. +Observatorium Operator | [observatorium-operator](https://github.com/stolostron/observatorium-operator) | Operator to deploy the Observatorium project. Inside the open cluster management, at this time, it means metrics using Thanos. Forked from main observatorium-operator repo. +Metrics collector | [metrics-collector](https://github.com/stolostron/multicluster-observability-operator/tree/main/collectors/metrics) | Scrapes metrics from Prometheus at managed clusters, the metric collection being shaped by configuring allow-list. +RBAC Proxy | [rbac_query_proxy](https://github.com/stolostron/multicluster-observability-operator/tree/main/proxy) | Helper service that acts a multicluster metrics RBAC proxy. +Grafana | [grafana](https://github.com/stolostron/grafana) | Grafana repo - for dashboarding and metric analytics. Forked from main grafana repo. +Dashboard Loader | [grafana-dashboard-loader](https://github.com/stolostron/multicluster-observability-operator/tree/main/loaders/dashboards) | Sidecar proxy to load grafana dashboards from configmaps. +Management Ingress | [management-ingress](https://github.com/stolostron/management-ingress) | NGINX based ingress controller to serve Open Cluster Management services. +Observatorium API | [observatorium](https://github.com/stolostron/observatorium) | API Gateway which controls reading, writing of the Observability data to the backend infrastructure. Forked from main observatorium API repo. +Thanos Ecosystem | [kube-thanos](https://github.com/stolostron/kube-thanos) | Kubernetes specific configuration for deploying Thanos. The observatorium operator leverages this configuration to deploy the backend Thanos components. + +## Quick Start Guide + +### Prerequisites + +* Ensure [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) and [kustomize](https://kubernetes-sigs.github.io/kustomize/installation) are installed. +* Prepare a OpenShift cluster to function as the hub cluster. +* Ensure [docker 17.03+](https://docs.docker.com/get-started) is installed. +* Ensure [golang 1.15+](https://golang.org/doc/install) is installed. +* Ensure [operator-sdk 1.4.2+](https://github.com/operator-framework/operator-sdk) in installed. +* Ensure the open-cluster-management cluster manager is installed. See [Cluster Manager](https://open-cluster-management.io/getting-started/core/cluster-manager/) for more information. +* Ensure the `open-cluster-management` _klusterlet_ is installed. See [Klusterlet](https://open-cluster-management.io/getting-started/core/register-cluster/) for more information. + +> Note: By default, the API conversion webhook use on the OpenShift service serving certificate feature to manage the certificate, you can replace it with cert-manager if you want to run the multicluster-observability-operator in a kubernetes cluster. + +Use the following quick start commands for building and testing the multicluster-observability-operator: + +### Clone the Repository + +Check out the multicluster-observability-operator repository. + +``` +git clone git@github.com:stolostron/multicluster-observability-operator.git +cd multicluster-observability-operator +``` + +### Build the Operator + +Build the multicluster-observability-operator image and push it to a public registry, such as quay.io: + +``` +make docker-build docker-push IMG=quay.io//multicluster-observability-operator:latest +``` + +### Run the Operator in the Cluster + +1. Create the `open-cluster-management-observability` namespace if it doesn't exist: +``` +kubectl create ns open-cluster-management-observability +``` + +2. Deploy the minio service which acts as storage service of the multicluster observability: +``` +kubectl -n open-cluster-management-observability apply -k examples/minio +``` + +3. Replace the operator image and deploy the multicluster-observability-operator: +``` +make deploy IMG=quay.io//multicluster-observability-operator:latest +``` + +4. Deploy the multicluster-observability-operator CR: +``` +kubectl apply -f operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml +``` + +5. Verify all the components for the Multicluster Observability are starting up and runing: +``` +kubectl -n open-cluster-management-observability get pod +NAME READY STATUS RESTARTS AGE +minio-79c7ff488d-72h65 1/1 Running 0 9m38s +observability-alertmanager-0 3/3 Running 0 7m17s +observability-alertmanager-1 3/3 Running 0 6m36s +observability-alertmanager-2 3/3 Running 0 6m18s +observability-grafana-85fdc8c48d-j67j6 2/2 Running 0 7m17s +observability-grafana-85fdc8c48d-wnltt 2/2 Running 0 7m17s +observability-observatorium-api-69cfff4c95-bpw5s 1/1 Running 0 7m2s +observability-observatorium-api-69cfff4c95-gbh7b 1/1 Running 0 7m2s +observability-observatorium-operator-5df6b7949c-kbpmp 1/1 Running 0 7m17s +observability-rbac-query-proxy-d44df47c4-9ccdn 2/2 Running 0 7m15s +observability-rbac-query-proxy-d44df47c4-rtcgh 2/2 Running 0 6m50s +observability-thanos-compact-0 1/1 Running 0 7m2s +observability-thanos-query-79c4d9488b-bd5sf 1/1 Running 0 7m3s +observability-thanos-query-79c4d9488b-d7wzt 1/1 Running 0 7m3s +observability-thanos-query-frontend-6fdb5d4946-rgblb 1/1 Running 0 7m3s +observability-thanos-query-frontend-6fdb5d4946-shsz2 1/1 Running 0 7m3s +observability-thanos-query-frontend-memcached-0 2/2 Running 0 7m3s +observability-thanos-query-frontend-memcached-1 2/2 Running 0 6m37s +observability-thanos-query-frontend-memcached-2 2/2 Running 0 6m33s +observability-thanos-receive-controller-6b446c5576-hj6xl 1/1 Running 0 7m3s +observability-thanos-receive-default-0 1/1 Running 0 7m2s +observability-thanos-receive-default-1 1/1 Running 0 6m20s +observability-thanos-receive-default-2 1/1 Running 0 5m50s +observability-thanos-rule-0 2/2 Running 0 7m3s +observability-thanos-rule-1 2/2 Running 0 6m27s +observability-thanos-rule-2 2/2 Running 0 5m56s +observability-thanos-store-memcached-0 2/2 Running 0 7m3s +observability-thanos-store-memcached-1 2/2 Running 0 6m37s +observability-thanos-store-memcached-2 2/2 Running 0 6m33s +observability-thanos-store-shard-0-0 1/1 Running 2 7m3s +observability-thanos-store-shard-1-0 1/1 Running 2 7m3s +observability-thanos-store-shard-2-0 1/1 Running 2 7m3s +``` + +### What is next + +After a successful deployment, you can run the following command to check if you have OCP cluster as a managed cluster. + +``` +kubectl get managedcluster --show-labels +``` +If there is no `vendor=OpenShift` label exists in your managed cluster, you can manually add this label with this command `kubectl label managedcluster vendor=OpenShift` + +Then you should be able to have `metrics-collector` pod is running: +``` +kubectl -n open-cluster-management-addon-observability get pod +endpoint-observability-operator-5c95cb9df9-4cphg 1/1 Running 0 97m +metrics-collector-deployment-6c7c8f9447-brpjj 1/1 Running 0 96m +``` + +Expose the thanos query frontend via route by running this command: +``` +cat << EOF | kubectl -n open-cluster-management-observability apply -f - +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: query-frontend +spec: + port: + targetPort: http + wildcardPolicy: None + to: + kind: Service + name: observability-thanos-query-frontend +EOF +``` +You can access the thanos query UI via browser by inputting the host from `oc get route -n open-cluster-management-observability query-frontend`. There should have metrics available when you search the metrics `:node_memory_MemAvailable_bytes:sum`. The available metrics are listed [here](https://github.com/stolostron/multicluster-observability-operator/blob/main/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml) + +### Uninstall the Operator in the Cluster + +1. Delete the multicluster-observability-operator CR: + +``` +kubectl -n open-cluster-management-observability delete -f operators/multiclusterobservability/config/samples/observability_v1beta2_multiclusterobservability.yaml +``` + +2. Delete the multicluster-observability-operator: + +``` +make undeploy +``` + +3. Delete the minio service: + +``` +kubectl -n open-cluster-management-observability delete -k examples/minio +``` + +4. Delete the `open-cluster-management-observability` namespace: + +``` +kubectl delete ns open-cluster-management-observability +``` + diff --git a/REMEDIATE.md b/REMEDIATE.md index 80ac8f84f..9d16f4a5f 100644 --- a/REMEDIATE.md +++ b/REMEDIATE.md @@ -3,3 +3,4 @@ ### Tue Sep 7 10:46:16 CST 2021 https://github.com/stolostron/backlog/issues/15853 + diff --git a/cicd-scripts/customize-mco.sh b/cicd-scripts/customize-mco.sh index 427b4e4de..f0cd905f5 100755 --- a/cicd-scripts/customize-mco.sh +++ b/cicd-scripts/customize-mco.sh @@ -24,7 +24,7 @@ if [[ "${PULL_BASE_REF}" == "release-"* ]]; then LATEST_SNAPSHOT=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'-SNAPSHOT")))|keys[length-1]') fi if [[ "${LATEST_SNAPSHOT}" == "null" ]] || [[ "${LATEST_SNAPSHOT}" == "" ]]; then - LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("SNAPSHOT")))|keys[length-1]') + LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select((.key|contains("SNAPSHOT"))and(.key|contains("9.9.0")|not)))|keys[length-1]') fi # trim the leading and tailing quotes diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh index b2e1da06d..4cd445aa3 100755 --- a/cicd-scripts/run-e2e-tests.sh +++ b/cicd-scripts/run-e2e-tests.sh @@ -19,6 +19,7 @@ GINKGO_FOCUS="$(cat /tmp/ginkgo_focus)" # need to modify sc for KinD if [[ -n "${IS_KIND_ENV}" ]]; then ${SED_COMMAND} "s~gp2$~standard~g" ${ROOTDIR}/examples/minio/minio-pvc.yaml + ${SED_COMMAND} "s~gp2$~standard~g" ${ROOTDIR}/examples/minio-tls/minio-pvc.yaml fi kubeconfig_hub_path="" @@ -79,6 +80,8 @@ else go install github.com/onsi/ginkgo/ginkgo@latest GINKGO_CMD="$(go env GOPATH)/bin/ginkgo" fi + +go mod vendor ${GINKGO_CMD} -debug -trace ${GINKGO_FOCUS} -v ${ROOTDIR}/tests/pkg/tests -- -options=${OPTIONSFILE} -v=3 cat ${ROOTDIR}/tests/pkg/tests/results.xml | grep failures=\"0\" | grep errors=\"0\" diff --git a/cicd-scripts/setup-e2e-tests.sh b/cicd-scripts/setup-e2e-tests.sh index 8fa2f5ebc..7079cd1fe 100755 --- a/cicd-scripts/setup-e2e-tests.sh +++ b/cicd-scripts/setup-e2e-tests.sh @@ -21,7 +21,7 @@ OCM_DEFAULT_NS="open-cluster-management" AGENT_NS="open-cluster-management-agent" HUB_NS="open-cluster-management-hub" OBSERVABILITY_NS="open-cluster-management-observability" -IMAGE_REPO="quay.io/open-cluster-management" +IMAGE_REPO="quay.io/stolostron" export MANAGED_CLUSTER="local-cluster" # registration-operator needs this SED_COMMAND='sed -i-e -e' @@ -44,12 +44,10 @@ BRANCH="" LATEST_SNAPSHOT="" if [[ "${PULL_BASE_REF}" == "release-"* ]]; then BRANCH=${PULL_BASE_REF#"release-"} - BRANCH=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'")))|keys[length-1]' | awk -F '-' '{print $1}') - BRANCH="${BRANCH#\"}" - LATEST_SNAPSHOT=$(curl https://quay.io//api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("'${BRANCH}'-SNAPSHOT")))|keys[length-1]') + LATEST_SNAPSHOT=`curl https://quay.io//api/v1/repository/open-cluster-management/multicluster-observability-operator | jq '.tags|with_entries(select(.key|test("'${BRANCH}'.*-SNAPSHOT-*")))|keys[length-1]'` fi if [[ "${LATEST_SNAPSHOT}" == "null" ]] || [[ "${LATEST_SNAPSHOT}" == "" ]]; then - LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select(.key|contains("SNAPSHOT")))|keys[length-1]') + LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/multicluster-observability-operator | jq '.tags|with_entries(select((.key|contains("SNAPSHOT"))and(.key|contains("9.9.0")|not)))|keys[length-1]') fi # trim the leading and tailing quotes @@ -88,8 +86,9 @@ deploy_hub_spoke_core() { git clone --depth 1 -b release-2.4 https://github.com/stolostron/registration-operator.git && cd registration-operator ${SED_COMMAND} "s~clusterName: cluster1$~clusterName: ${MANAGED_CLUSTER}~g" deploy/klusterlet/config/samples/operator_open-cluster-management_klusterlets.cr.yaml # deploy hub and spoke via OLM - make cluster-ip - make deploy + REGISTRATION_LATEST_SNAPSHOT=$(curl https://quay.io/api/v1/repository/stolostron/registration | jq '.tags|with_entries(select(.key|test("'2.4'.*-SNAPSHOT-*")))|keys[length-1]') + make cluster-ip IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} + make deploy IMAGE_REGISTRY=quay.io/stolostron IMAGE_TAG=${REGISTRATION_LATEST_SNAPSHOT} WORK_TAG=${REGISTRATION_LATEST_SNAPSHOT} REGISTRATION_TAG=${REGISTRATION_LATEST_SNAPSHOT} PLACEMENT_TAG=${REGISTRATION_LATEST_SNAPSHOT} # wait until hub and spoke are ready wait_for_deployment_ready 10 60s ${HUB_NS} cluster-manager-registration-controller cluster-manager-registration-webhook cluster-manager-work-webhook @@ -146,7 +145,8 @@ approve_csr_joinrequest() { deploy_grafana_test() { cd ${ROOTDIR} ${SED_COMMAND} "s~name: grafana$~name: grafana-test~g; s~app: multicluster-observability-grafana$~app: multicluster-observability-grafana-test~g; s~secretName: grafana-config$~secretName: grafana-config-test~g; s~secretName: grafana-datasources$~secretName: grafana-datasources-test~g; /MULTICLUSTEROBSERVABILITY_CR_NAME/d" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml - ${SED_COMMAND} "s~image: quay.io/stolostron/grafana-dashboard-loader.*$~image: ${IMAGE_REPO}/grafana-dashboard-loader:${LATEST_SNAPSHOT}~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + ${SED_COMMAND} "s~image: quay.io/stolostron/grafana-dashboard-loader:.*$~image: ${IMAGE_REPO}/grafana-dashboard-loader:${LATEST_SNAPSHOT}~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml + ${SED_COMMAND} "s~image: quay.io/stolostron/grafana:.*$~image: ${IMAGE_REPO}/grafana:${LATEST_SNAPSHOT}~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml ${SED_COMMAND} "s~replicas: 2$~replicas: 1~g" operators/multiclusterobservability/manifests/base/grafana/deployment.yaml kubectl apply -f operators/multiclusterobservability/manifests/base/grafana/deployment.yaml kubectl apply -f ${ROOTDIR}/tests/run-in-kind/grafana # create grafana-test svc, grafana-test config and datasource configmaps @@ -273,3 +273,4 @@ execute() { # start executing the ACTION execute + diff --git a/collectors/metrics/cmd/metrics-collector/main.go b/collectors/metrics/cmd/metrics-collector/main.go index 458c2dd1e..fdd71d095 100644 --- a/collectors/metrics/cmd/metrics-collector/main.go +++ b/collectors/metrics/cmd/metrics-collector/main.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/uuid" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/collectrule" "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/forwarder" collectorhttp "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/http" "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" @@ -30,11 +31,13 @@ import ( func main() { opt := &Options{ - Listen: "localhost:9002", - LimitBytes: 200 * 1024, - Rules: []string{`{__name__="up"}`}, - Interval: 4*time.Minute + 30*time.Second, - WorkerNum: 1, + From: "http://localhost:9090", + Listen: "localhost:9002", + LimitBytes: 200 * 1024, + Rules: []string{`{__name__="up"}`}, + Interval: 4*time.Minute + 30*time.Second, + EvaluateInterval: 30 * time.Second, + WorkerNum: 1, } cmd := &cobra.Command{ Short: "Federate Prometheus via push", @@ -45,38 +48,164 @@ func main() { }, } - cmd.Flags().Int64Var(&opt.WorkerNum, "worker-number", opt.WorkerNum, "The number of client runs in the simulate environment.") - cmd.Flags().StringVar(&opt.Listen, "listen", opt.Listen, "A host:port to listen on for health and metrics.") - cmd.Flags().StringVar(&opt.From, "from", opt.From, "The Prometheus server to federate from.") - cmd.Flags().StringVar(&opt.FromToken, "from-token", opt.FromToken, "A bearer token to use when authenticating to the source Prometheus server.") - cmd.Flags().StringVar(&opt.FromCAFile, "from-ca-file", opt.FromCAFile, "A file containing the CA certificate to use to verify the --from URL in addition to the system roots certificates.") - cmd.Flags().StringVar(&opt.FromTokenFile, "from-token-file", opt.FromTokenFile, "A file containing a bearer token to use when authenticating to the source Prometheus server.") - cmd.Flags().StringVar(&opt.ToUpload, "to-upload", opt.ToUpload, "A server endpoint to push metrics to.") - cmd.Flags().DurationVar(&opt.Interval, "interval", opt.Interval, "The interval between scrapes. Prometheus returns the last 5 minutes of metrics when invoking the federation endpoint.") - cmd.Flags().Int64Var(&opt.LimitBytes, "limit-bytes", opt.LimitBytes, "The maxiumum acceptable size of a response returned when scraping Prometheus.") + cmd.Flags().Int64Var( + &opt.WorkerNum, + "worker-number", + opt.WorkerNum, + "The number of client runs in the simulate environment.") + cmd.Flags().StringVar( + &opt.Listen, + "listen", + opt.Listen, + "A host:port to listen on for health and metrics.") + cmd.Flags().StringVar( + &opt.From, + "from", + opt.From, + "The Prometheus server to federate from.") + cmd.Flags().StringVar( + &opt.FromToken, + "from-token", + opt.FromToken, + "A bearer token to use when authenticating to the source Prometheus server.") + cmd.Flags().StringVar( + &opt.FromCAFile, + "from-ca-file", + opt.FromCAFile, + `A file containing the CA certificate to use to verify the --from URL in + addition to the system roots certificates.`) + cmd.Flags().StringVar( + &opt.FromTokenFile, + "from-token-file", + opt.FromTokenFile, + "A file containing a bearer token to use when authenticating to the source Prometheus server.") + cmd.Flags().StringVar( + &opt.ToUpload, + "to-upload", + opt.ToUpload, + "A server endpoint to push metrics to.") + cmd.Flags().StringVar( + &opt.ToUploadCA, + "to-upload-ca", + opt.ToUploadCA, + "A file containing the CA certificate to verify the --to-upload URL in addition to the system certificates.") + cmd.Flags().StringVar( + &opt.ToUploadCert, + "to-upload-cert", + opt.ToUploadCert, + "A file containing the certificate to use to secure the request to the --to-upload URL.") + cmd.Flags().StringVar( + &opt.ToUploadKey, + "to-upload-key", + opt.ToUploadKey, + "A file containing the certificate key to use to secure the request to the --to-upload URL.") + cmd.Flags().DurationVar( + &opt.Interval, + "interval", + opt.Interval, + `The interval between scrapes. Prometheus returns the last 5 minutes of + metrics when invoking the federation endpoint.`) + cmd.Flags().DurationVar( + &opt.EvaluateInterval, + "evaluate-interval", + opt.EvaluateInterval, + "The interval between collect rule evaluation.") + cmd.Flags().Int64Var( + &opt.LimitBytes, + "limit-bytes", + opt.LimitBytes, + "The maxiumum acceptable size of a response returned when scraping Prometheus.") // TODO: more complex input definition, such as a JSON struct - cmd.Flags().StringArrayVar(&opt.Rules, "match", opt.Rules, "Match rules to federate.") - cmd.Flags().StringArrayVar(&opt.RecordingRules, "recordingrule", opt.RecordingRules, "Define recording rule is to generate new metrics based on specified query expression.") - cmd.Flags().StringVar(&opt.RulesFile, "match-file", opt.RulesFile, "A file containing match rules to federate, one rule per line.") - - cmd.Flags().StringSliceVar(&opt.LabelFlag, "label", opt.LabelFlag, "Labels to add to each outgoing metric, in key=value form.") - cmd.Flags().StringSliceVar(&opt.RenameFlag, "rename", opt.RenameFlag, "Rename metrics before sending by specifying OLD=NEW name pairs.") - cmd.Flags().StringArrayVar(&opt.ElideLabels, "elide-label", opt.ElideLabels, "A list of labels to be elided from outgoing metrics. Default to elide label prometheus and prometheus_replica") - - cmd.Flags().StringSliceVar(&opt.AnonymizeLabels, "anonymize-labels", opt.AnonymizeLabels, "Anonymize the values of the provided values before sending them on.") - cmd.Flags().StringVar(&opt.AnonymizeSalt, "anonymize-salt", opt.AnonymizeSalt, "A secret and unguessable value used to anonymize the input data.") - cmd.Flags().StringVar(&opt.AnonymizeSaltFile, "anonymize-salt-file", opt.AnonymizeSaltFile, "A file containing a secret and unguessable value used to anonymize the input data.") - - cmd.Flags().BoolVarP(&opt.Verbose, "verbose", "v", opt.Verbose, "Show verbose output.") - - cmd.Flags().StringVar(&opt.LogLevel, "log-level", opt.LogLevel, "Log filtering level. e.g info, debug, warn, error") + cmd.Flags().StringArrayVar( + &opt.Rules, + "match", + opt.Rules, + "Match rules to federate.") + cmd.Flags().StringVar( + &opt.RulesFile, + "match-file", + opt.RulesFile, + "A file containing match rules to federate, one rule per line.") + cmd.Flags().StringArrayVar( + &opt.RecordingRules, + "recordingrule", + opt.RecordingRules, + "Define recording rule is to generate new metrics based on specified query expression.") + cmd.Flags().StringVar( + &opt.RecordingRulesFile, + "recording-file", + opt.RulesFile, + "A file containing recording rules.") + cmd.Flags().StringArrayVar( + &opt.CollectRules, + "collectrule", + opt.CollectRules, + "Define metrics collect rule is to collect additional metrics based on specified event.") + cmd.Flags().StringVar( + &opt.RecordingRulesFile, + "collect-file", + opt.RecordingRulesFile, + "A file containing collect rules.") + + cmd.Flags().StringSliceVar( + &opt.LabelFlag, + "label", + opt.LabelFlag, + "Labels to add to each outgoing metric, in key=value form.") + cmd.Flags().StringSliceVar( + &opt.RenameFlag, + "rename", + opt.RenameFlag, + "Rename metrics before sending by specifying OLD=NEW name pairs.") + cmd.Flags().StringArrayVar( + &opt.ElideLabels, + "elide-label", + opt.ElideLabels, + `A list of labels to be elided from outgoing metrics. Default to elide + label prometheus and prometheus_replica`) + + cmd.Flags().StringSliceVar( + &opt.AnonymizeLabels, + "anonymize-labels", + opt.AnonymizeLabels, + "Anonymize the values of the provided values before sending them on.") + cmd.Flags().StringVar( + &opt.AnonymizeSalt, + "anonymize-salt", + opt.AnonymizeSalt, + "A secret and unguessable value used to anonymize the input data.") + cmd.Flags().StringVar( + &opt.AnonymizeSaltFile, + "anonymize-salt-file", + opt.AnonymizeSaltFile, + "A file containing a secret and unguessable value used to anonymize the input data.") + + cmd.Flags().BoolVarP( + &opt.Verbose, + "verbose", "v", + opt.Verbose, + "Show verbose output.") + + cmd.Flags().StringVar( + &opt.LogLevel, + "log-level", + opt.LogLevel, + "Log filtering level. e.g info, debug, warn, error") // deprecated opt - cmd.Flags().StringVar(&opt.Identifier, "id", opt.Identifier, "The unique identifier for metrics sent with this client.") + cmd.Flags().StringVar( + &opt.Identifier, + "id", + opt.Identifier, + "The unique identifier for metrics sent with this client.") //simulation test - cmd.Flags().StringVar(&opt.SimulatedTimeseriesFile, "simulated-timeseries-file", opt.SimulatedTimeseriesFile, "A file containing the sample of timeseries.") + cmd.Flags().StringVar( + &opt.SimulatedTimeseriesFile, + "simulated-timeseries-file", + opt.SimulatedTimeseriesFile, + "A file containing the sample of timeseries.") l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) lvl, err := cmd.Flags().GetString("log-level") @@ -106,6 +235,9 @@ type Options struct { FromCAFile string FromToken string FromTokenFile string + ToUploadCA string + ToUploadCert string + ToUploadKey string RenameFlag []string Renames map[string]string @@ -116,14 +248,18 @@ type Options struct { AnonymizeSalt string AnonymizeSaltFile string - Rules []string - RecordingRules []string - RulesFile string + Rules []string + RulesFile string + RecordingRules []string + RecordingRulesFile string + CollectRules []string + CollectRulesFile string LabelFlag []string Labels map[string]string - Interval time.Duration + Interval time.Duration + EvaluateInterval time.Duration LogLevel string Logger log.Logger @@ -153,7 +289,12 @@ func (o *Options) Run() error { return fmt.Errorf("failed to configure metrics collector: %v", err) } - logger.Log(o.Logger, logger.Info, "msg", "starting metrics collector", "from", o.From, "to", o.ToUpload, "listen", o.Listen) + logger.Log( + o.Logger, logger.Info, + "msg", "starting metrics collector", + "from", o.From, + "to", o.ToUpload, + "listen", o.Listen) { // Execute the worker's `Run` func. @@ -224,6 +365,20 @@ func (o *Options) Run() error { return err } + if len(o.CollectRules) != 0 { + evaluator, err := collectrule.New(*cfg) + if err != nil { + return fmt.Errorf("failed to configure collect rule evaluator: %v", err) + } + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + evaluator.Run(ctx) + return nil + }, func(error) { + cancel() + }) + } + return g.Run() } @@ -234,6 +389,9 @@ func runMultiWorkers(o *Options) error { ToUpload: o.ToUpload, FromCAFile: o.FromCAFile, FromTokenFile: o.FromTokenFile, + ToUploadCA: o.ToUploadCA, + ToUploadCert: o.ToUploadCert, + ToUploadKey: o.ToUploadKey, Rules: o.Rules, RenameFlag: o.RenameFlag, RecordingRules: o.RecordingRules, @@ -360,16 +518,21 @@ func initConfig(o *Options) (error, *forwarder.Config) { FromToken: o.FromToken, FromTokenFile: o.FromTokenFile, FromCAFile: o.FromCAFile, + ToUploadCA: o.ToUploadCA, + ToUploadCert: o.ToUploadCert, + ToUploadKey: o.ToUploadKey, AnonymizeLabels: o.AnonymizeLabels, AnonymizeSalt: o.AnonymizeSalt, AnonymizeSaltFile: o.AnonymizeSaltFile, Debug: o.Verbose, Interval: o.Interval, + EvaluateInterval: o.EvaluateInterval, LimitBytes: o.LimitBytes, Rules: o.Rules, - RecordingRules: o.RecordingRules, RulesFile: o.RulesFile, + RecordingRules: o.RecordingRules, + CollectRules: o.CollectRules, Transformer: transformer, Logger: o.Logger, diff --git a/collectors/metrics/pkg/collectrule/evaluator.go b/collectors/metrics/pkg/collectrule/evaluator.go new file mode 100644 index 000000000..1f5aa42f3 --- /dev/null +++ b/collectors/metrics/pkg/collectrule/evaluator.go @@ -0,0 +1,362 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package collectrule + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" + "sync" + "time" + + "github.com/go-kit/kit/log" + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/pkg/labels" + + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/forwarder" + rlogger "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/logger" + "github.com/stolostron/multicluster-observability-operator/collectors/metrics/pkg/metricsclient" +) + +const ( + expireDuration = 15 * time.Minute +) + +var ( + config forwarder.Config + forwardWorker *forwarder.Worker + cancel context.CancelFunc + rules = []CollectRule{} + pendingRules = map[string]*EvaluatedRule{} + firingRules = map[string]*EvaluatedRule{} + enabledMatches = map[uint64][]string{} +) + +type EvaluatedRule struct { + triggerTime map[uint64]*time.Time + resolveTime map[uint64]*time.Time +} +type CollectRule struct { + Name string `json:"name"` + Expr string `json:"expr"` + DurationStr string `json:"for"` + Names []string `json:"names"` + Matches []string `json:"matches"` + Duration time.Duration +} + +type Evaluator struct { + fromClient *metricsclient.Client + from *url.URL + + interval time.Duration + collectRules []string + + lock sync.Mutex + reconfigure chan struct{} + + logger log.Logger +} + +func New(cfg forwarder.Config) (*Evaluator, error) { + config = forwarder.Config{ + From: cfg.From, + FromToken: cfg.FromToken, + FromTokenFile: cfg.FromTokenFile, + FromCAFile: cfg.FromCAFile, + + ToUpload: cfg.ToUpload, + ToUploadCA: cfg.ToUploadCA, + ToUploadCert: cfg.ToUploadCert, + ToUploadKey: cfg.ToUploadKey, + + AnonymizeLabels: cfg.AnonymizeLabels, + AnonymizeSalt: cfg.AnonymizeSalt, + AnonymizeSaltFile: cfg.AnonymizeSaltFile, + Debug: cfg.Debug, + Interval: cfg.EvaluateInterval, + LimitBytes: cfg.LimitBytes, + Transformer: cfg.Transformer, + + Logger: cfg.Logger, + } + from := &url.URL{ + Scheme: cfg.From.Scheme, + Host: cfg.From.Host, + Path: "/api/v1/query", + } + evaluator := Evaluator{ + from: from, + interval: cfg.EvaluateInterval, + collectRules: cfg.CollectRules, + reconfigure: make(chan struct{}), + logger: log.With(cfg.Logger, "component", "collectrule/evaluator"), + } + + if err := unmarshalCollectorRules(evaluator); err != nil { + return nil, err + } + + if evaluator.interval == 0 { + evaluator.interval = 30 * time.Second + } + + fromClient, err := forwarder.CreateFromClient(cfg, evaluator.interval, "evaluate_query", cfg.Logger) + if err != nil { + return nil, err + } + evaluator.fromClient = fromClient + + return &evaluator, nil +} + +func (e *Evaluator) Reconfigure(cfg forwarder.Config) error { + evaluator, err := New(cfg) + if err != nil { + return fmt.Errorf("failed to reconfigure: %v", err) + } + + e.lock.Lock() + defer e.lock.Unlock() + + e.fromClient = evaluator.fromClient + e.interval = evaluator.interval + e.from = evaluator.from + e.collectRules = evaluator.collectRules + if err = unmarshalCollectorRules(*e); err != nil { + return err + } + + // Signal a restart to Run func. + // Do this in a goroutine since we do not care if restarting the Run loop is asynchronous. + go func() { e.reconfigure <- struct{}{} }() + return nil +} + +func (e *Evaluator) Run(ctx context.Context) { + for { + // Ensure that the Worker does not access critical configuration during a reconfiguration. + e.lock.Lock() + wait := e.interval + // The critical section ends here. + e.lock.Unlock() + + e.evaluate(ctx) + + select { + // If the context is cancelled, then we're done. + case <-ctx.Done(): + return + case <-time.After(wait): + // We want to be able to interrupt a sleep to immediately apply a new configuration. + case <-e.reconfigure: + } + } +} + +func unmarshalCollectorRules(e Evaluator) error { + rules = []CollectRule{} + for _, ruleStr := range e.collectRules { + rule := &CollectRule{} + err := json.Unmarshal(([]byte)(ruleStr), rule) + if err != nil { + rlogger.Log(e.logger, rlogger.Error, "msg", "Input error", "err", err, "rule", rule) + return err + } + if rule.DurationStr != "" { + rule.Duration, err = time.ParseDuration(rule.DurationStr) + if err != nil { + rlogger.Log(e.logger, rlogger.Error, "msg", "wrong duration string found in collect rule", "for", rule.DurationStr) + } + } + rules = append(rules, *rule) + if pendingRules[rule.Name] == nil { + pendingRules[rule.Name] = &EvaluatedRule{ + triggerTime: map[uint64]*time.Time{}, + } + } + if firingRules[rule.Name] == nil { + firingRules[rule.Name] = &EvaluatedRule{ + triggerTime: map[uint64]*time.Time{}, + resolveTime: map[uint64]*time.Time{}, + } + } + } + return nil +} + +func getMatches() []string { + matches := []string{} + for _, v := range enabledMatches { + matches = append(matches, v[:]...) + } + return matches +} + +func startWorker() error { + if forwardWorker == nil { + var err error + forwardWorker, err = forwarder.New(config) + if err != nil { + return fmt.Errorf("failed to configure forwarder for additional metrics: %v", err) + } + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go func() { + forwardWorker.Run(ctx) + cancel() + }() + } else { + err := forwardWorker.Reconfigure(config) + if err != nil { + return fmt.Errorf("failed to reconfigure forwarder for additional metrics: %v", err) + } + } + + return nil +} + +func renderMatches(r CollectRule, ls labels.Labels) []string { + matches := []string{} + for _, name := range r.Names { + matches = append(matches, fmt.Sprintf(`{__name__="%s"}`, name)) + } + labelsMap := map[string]string{} + for _, match := range r.Matches { + r := regexp.MustCompile(`\{\{ \$labels\.(.*) \}\}`) + m := r.FindAllStringSubmatch(match, -1) + for _, v := range m { + if _, ok := labelsMap[v[1]]; !ok { + for _, l := range ls { + if l.Name == v[1] { + labelsMap[l.Name] = l.Value + break + } + } + } + original := fmt.Sprintf("{{ $labels.%s }}", v[1]) + replace := labelsMap[v[1]] + matches = append(matches, fmt.Sprintf("{%s}", strings.ReplaceAll(match, original, replace))) + } + } + return matches +} + +func evaluateRule(logger log.Logger, r CollectRule, metrics []*clientmodel.MetricFamily) bool { + isUpdate := false + now := time.Now() + pendings := map[uint64]string{} + firings := map[uint64]string{} + for k, _ := range (*pendingRules[r.Name]).triggerTime { + pendings[k] = "" + } + for k, _ := range (*firingRules[r.Name]).triggerTime { + firings[k] = "" + } + for _, metric := range metrics { + for _, m := range metric.Metric { + ls := labels.Labels{} + for _, l := range m.Label { + ls = append(ls, labels.Label{ + Name: *l.Name, + Value: *l.Value, + }) + } + ls = append(ls, labels.Label{ + Name: "rule_name", + Value: r.Name, + }) + h := ls.Hash() + if (*firingRules[r.Name]).triggerTime[h] != nil { + delete(firings, h) + if (*firingRules[r.Name]).resolveTime[h] != nil { + // resolved rule triggered again + delete((*firingRules[r.Name]).resolveTime, h) + } + continue + } + if (*pendingRules[r.Name]).triggerTime[h] == nil { + if r.Duration == 0 { + // no duration defined, fire immediately + (*firingRules[r.Name]).triggerTime[h] = &now + enabledMatches[h] = renderMatches(r, ls) + isUpdate = true + rlogger.Log(logger, rlogger.Info, "msg", "collect rule fired", "name", r.Name, "labels", ls) + } else { + (*pendingRules[r.Name]).triggerTime[h] = &now + } + continue + } + + delete(pendings, h) + if time.Since(*(*pendingRules[r.Name]).triggerTime[h]) >= r.Duration { + // already passed duration, fire + (*firingRules[r.Name]).triggerTime[h] = &now + delete((*pendingRules[r.Name]).triggerTime, h) + enabledMatches[h] = renderMatches(r, ls) + isUpdate = true + rlogger.Log(logger, rlogger.Info, "msg", "collect rule fired", "name", r.Name, "labels", ls) + } + } + } + for k, _ := range pendings { + delete((*pendingRules[r.Name]).triggerTime, k) + } + for k, _ := range firings { + if (*firingRules[r.Name]).resolveTime[k] == nil { + (*firingRules[r.Name]).resolveTime[k] = &now + } else if time.Since(*(*firingRules[r.Name]).resolveTime[k]) >= expireDuration { + delete((*firingRules[r.Name]).triggerTime, k) + delete((*firingRules[r.Name]).resolveTime, k) + delete(enabledMatches, k) + isUpdate = true + rlogger.Log(logger, rlogger.Info, "msg", "fired collect rule resolved", "name", r.Name) + } + } + return isUpdate +} + +func (e *Evaluator) evaluate(ctx context.Context) { + isUpdate := false + for _, r := range rules { + from := e.from + from.RawQuery = "" + v := e.from.Query() + v.Add("query", r.Expr) + from.RawQuery = v.Encode() + + req := &http.Request{Method: "GET", URL: from} + result, err := e.fromClient.RetrievRecordingMetrics(ctx, req, r.Name) + if err != nil { + rlogger.Log(e.logger, rlogger.Error, "msg", "failed to evaluate collect rule", "err", err, "rule", r.Expr) + continue + } else { + if evaluateRule(e.logger, r, result) { + isUpdate = true + } + } + } + if isUpdate { + config.Rules = getMatches() + + if len(config.Rules) == 0 { + if forwardWorker != nil && cancel != nil { + cancel() + forwardWorker = nil + rlogger.Log(e.logger, rlogger.Info, "msg", "forwarder stopped") + } + } else { + err := startWorker() + if err != nil { + rlogger.Log(e.logger, rlogger.Error, "msg", "failed to start forwarder to collect metrics", "error", err) + } else { + rlogger.Log(e.logger, rlogger.Info, "msg", "forwarder started/reconfigued to collect metrics") + } + } + } +} diff --git a/collectors/metrics/pkg/collectrule/evaluator_test.go b/collectors/metrics/pkg/collectrule/evaluator_test.go new file mode 100644 index 000000000..1ee7fd5ca --- /dev/null +++ b/collectors/metrics/pkg/collectrule/evaluator_test.go @@ -0,0 +1,265 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +package collectrule + +import ( + "os" + "testing" + "time" + + "github.com/go-kit/log" + clientmodel "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/pkg/labels" +) + +const ( + TEST_RULE_NAME = "test_rule" +) + +func getTimePointer(d time.Duration) *time.Time { + testTime := time.Now().Add(-d) + return &testTime +} + +func getHash(k string, v string) uint64 { + ls := labels.Labels{} + ls = append(ls, labels.Label{ + Name: k, + Value: v, + }) + ls = append(ls, labels.Label{ + Name: "rule_name", + Value: TEST_RULE_NAME, + }) + return ls.Hash() +} + +func createMetricsFamiliy(k string, v string) []*clientmodel.MetricFamily { + metrics := []*clientmodel.Metric{ + { + Label: []*clientmodel.LabelPair{ + { + Name: &k, + Value: &v, + }, + }, + }, + } + families := []*clientmodel.MetricFamily{} + family := &clientmodel.MetricFamily{ + Metric: metrics, + } + families = append(families, family) + return families +} + +func getCollectRule(d time.Duration) CollectRule { + return CollectRule{ + Name: TEST_RULE_NAME, + Duration: d, + Names: []string{"name"}, + Matches: []string{`__name__="kube_resourcequota",namespace="{{ $labels.namespace }}"`}, + } +} + +func getEvaluatedRulesMap(h uint64, times ...*time.Time) map[string]*EvaluatedRule { + triggerTime := map[uint64]*time.Time{} + if len(times) >= 1 { + triggerTime[h] = times[0] + } + resolveTime := map[uint64]*time.Time{} + if len(times) >= 2 { + resolveTime[h] = times[1] + } + return map[string]*EvaluatedRule{ + TEST_RULE_NAME: { + triggerTime: triggerTime, + resolveTime: resolveTime, + }, + } +} + +func getEnabledMatches() map[uint64][]string { + return map[uint64][]string{ + getHash("namespace", "test"): { + `{__name__="name"}`, + `{__name__="kube_resourcequota",namespace="test"}`, + }, + } +} + +func TestEvaluateRule(t *testing.T) { + caseList := []struct { + name string + rule CollectRule + metrics []*clientmodel.MetricFamily + pendingRules map[string]*EvaluatedRule + firingRules map[string]*EvaluatedRule + enabledMatches map[uint64][]string + isUpdate bool + pendingSize int + pendingHash uint64 + firingSize int + firingHash uint64 + enabledMatchesSize int + enabledMatchesHash uint64 + hasMatchOne bool + MatchOne string + hasMatchTwo bool + MatchTwo string + firingResolveSize int + }{ + { + name: "first trigger", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "test"), + pendingRules: getEvaluatedRulesMap(0), + firingRules: getEvaluatedRulesMap(0), + enabledMatches: map[uint64][]string{}, + isUpdate: false, + pendingSize: 1, + pendingHash: getHash("namespace", "test"), + }, + { + name: "first trigger and fire", + rule: getCollectRule(0), + metrics: createMetricsFamiliy("namespace", "test"), + pendingRules: getEvaluatedRulesMap(0), + firingRules: getEvaluatedRulesMap(0), + enabledMatches: map[uint64][]string{}, + isUpdate: true, + firingSize: 1, + firingHash: getHash("namespace", "test"), + enabledMatchesSize: 1, + enabledMatchesHash: getHash("namespace", "test"), + hasMatchOne: true, + MatchOne: `{__name__="name"}`, + hasMatchTwo: true, + MatchTwo: `{__name__="kube_resourcequota",namespace="test"}`, + }, + { + name: "second trigger", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "test"), + pendingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(5*time.Minute)), + firingRules: getEvaluatedRulesMap(0), + enabledMatches: map[uint64][]string{}, + isUpdate: false, + pendingSize: 1, + pendingHash: getHash("namespace", "test"), + }, + { + name: "second trigger and fire", + rule: getCollectRule(1 * time.Minute), + metrics: createMetricsFamiliy("namespace", "test"), + pendingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(5*time.Minute)), + firingRules: getEvaluatedRulesMap(0), + enabledMatches: map[uint64][]string{}, + isUpdate: true, + firingSize: 1, + firingHash: getHash("namespace", "test"), + enabledMatchesSize: 1, + enabledMatchesHash: getHash("namespace", "test"), + hasMatchOne: true, + MatchOne: `{__name__="name"}`, + hasMatchTwo: true, + MatchTwo: `{__name__="kube_resourcequota",namespace="test"}`, + }, + { + name: "new trigger remove old resolve", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "test"), + pendingRules: getEvaluatedRulesMap(0), + firingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(2*time.Minute), getTimePointer(3*time.Minute)), + enabledMatches: getEnabledMatches(), + isUpdate: false, + firingSize: 1, + firingHash: getHash("namespace", "test"), + enabledMatchesSize: 1, + enabledMatchesHash: getHash("namespace", "test"), + hasMatchOne: true, + MatchOne: `{__name__="name"}`, + hasMatchTwo: true, + MatchTwo: `{__name__="kube_resourcequota",namespace="test"}`, + firingResolveSize: 0, + }, + { + name: "new trigger remove pending", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "another_test"), + pendingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(5*time.Minute)), + firingRules: getEvaluatedRulesMap(0), + enabledMatches: map[uint64][]string{}, + isUpdate: false, + pendingSize: 1, + pendingHash: getHash("namespace", "another_test"), + }, + { + name: "new trigger mark firing resolve", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "another_test"), + pendingRules: getEvaluatedRulesMap(0), + firingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(5*time.Minute)), + enabledMatches: getEnabledMatches(), + isUpdate: false, + pendingSize: 1, + pendingHash: getHash("namespace", "another_test"), + firingSize: 1, + firingHash: getHash("namespace", "test"), + enabledMatchesSize: 1, + enabledMatchesHash: getHash("namespace", "test"), + hasMatchOne: true, + MatchOne: `{__name__="name"}`, + hasMatchTwo: true, + MatchTwo: `{__name__="kube_resourcequota",namespace="test"}`, + firingResolveSize: 1, + }, + { + name: "new trigger remove the firing", + rule: getCollectRule(10 * time.Minute), + metrics: createMetricsFamiliy("namespace", "another_test"), + pendingRules: getEvaluatedRulesMap(0), + firingRules: getEvaluatedRulesMap(getHash("namespace", "test"), getTimePointer(25*time.Minute), getTimePointer(20*time.Minute)), + enabledMatches: getEnabledMatches(), + isUpdate: true, + pendingSize: 1, + pendingHash: getHash("namespace", "another_test"), + }, + } + + logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + pendingRules = c.pendingRules + firingRules = c.firingRules + enabledMatches = c.enabledMatches + isUpdate := evaluateRule(logger, c.rule, c.metrics) + if isUpdate != c.isUpdate { + t.Errorf("case (%v) isUpdate: (%v) is not the expected: (%v)", c.name, isUpdate, + c.isUpdate) + } else if c.pendingSize != len(pendingRules[TEST_RULE_NAME].triggerTime) { + t.Errorf("case (%v) pendingRules size: (%v) is not the expected: (%v)", c.name, len(pendingRules[TEST_RULE_NAME].triggerTime), + c.pendingSize) + } else if c.pendingSize > 0 && pendingRules[TEST_RULE_NAME].triggerTime[c.pendingHash] == nil { + t.Errorf("case (%v) pendingRules has no key: (%v)", c.name, c.pendingHash) + } else if c.firingSize != len(firingRules[TEST_RULE_NAME].triggerTime) { + t.Errorf("case (%v) firingRules size: (%v) is not the expected: (%v)", c.name, len(firingRules[TEST_RULE_NAME].triggerTime), + c.firingSize) + } else if c.firingSize > 0 && firingRules[TEST_RULE_NAME].triggerTime[c.firingHash] == nil { + t.Errorf("case (%v) firingRules has no key: (%v)", c.name, c.firingHash) + } else if c.enabledMatchesSize != len(enabledMatches) { + t.Errorf("case (%v) enabledMatches size: (%v) is not the expected: (%v)", c.name, len(enabledMatches), + c.enabledMatchesSize) + } else if c.enabledMatchesSize > 0 && enabledMatches[c.firingHash] == nil { + t.Errorf("case (%v) enabledMatches has no key: (%v)", c.name, c.enabledMatchesHash) + } else if c.hasMatchOne && c.MatchOne != enabledMatches[c.firingHash][0] { + t.Errorf("case (%v) enabledMatches first match: (%v) is not the expected: (%v)", c.name, enabledMatches[c.firingHash][0], c.MatchOne) + } else if c.hasMatchTwo && c.MatchTwo != enabledMatches[c.firingHash][1] { + t.Errorf("case (%v) enabledMatches second match: (%v) is not the expected: (%v)", c.name, enabledMatches[c.firingHash][1], c.MatchTwo) + } else if c.firingResolveSize != len(firingRules[TEST_RULE_NAME].resolveTime) { + t.Errorf("case (%v) firingRules resolveTime size: (%d) is not the expected: (%d)", c.name, len(firingRules[TEST_RULE_NAME].resolveTime), + c.firingResolveSize) + } + }) + } +} diff --git a/collectors/metrics/pkg/forwarder/forwarder.go b/collectors/metrics/pkg/forwarder/forwarder.go index ddf1842c9..4e9959089 100644 --- a/collectors/metrics/pkg/forwarder/forwarder.go +++ b/collectors/metrics/pkg/forwarder/forwarder.go @@ -67,17 +67,24 @@ type Config struct { FromToken string FromTokenFile string FromCAFile string - - AnonymizeLabels []string - AnonymizeSalt string - AnonymizeSaltFile string - Debug bool - Interval time.Duration - LimitBytes int64 - Rules []string - RecordingRules []string - RulesFile string - Transformer metricfamily.Transformer + ToUploadCA string + ToUploadCert string + ToUploadKey string + + AnonymizeLabels []string + AnonymizeSalt string + AnonymizeSaltFile string + Debug bool + Interval time.Duration + EvaluateInterval time.Duration + LimitBytes int64 + Rules []string + RulesFile string + RecordingRules []string + RecordingRulesFile string + CollectRules []string + CollectRulesFile string + Transformer metricfamily.Transformer Logger log.Logger SimulatedTimeseriesFile string @@ -108,35 +115,8 @@ type Worker struct { status status.StatusReport } -func createClients(cfg Config, interval time.Duration, - logger log.Logger) (*metricsclient.Client, *metricsclient.Client, metricfamily.MultiTransformer, error) { - - var transformer metricfamily.MultiTransformer - - // Configure the anonymization. - anonymizeSalt := cfg.AnonymizeSalt - if len(cfg.AnonymizeSalt) == 0 && len(cfg.AnonymizeSaltFile) > 0 { - data, err := ioutil.ReadFile(cfg.AnonymizeSaltFile) - if err != nil { - return nil, nil, transformer, fmt.Errorf("failed to read anonymize-salt-file: %v", err) - } - anonymizeSalt = strings.TrimSpace(string(data)) - } - if len(cfg.AnonymizeLabels) != 0 && len(anonymizeSalt) == 0 { - return nil, nil, transformer, fmt.Errorf("anonymize-salt must be specified if anonymize-labels is set") - } - if len(cfg.AnonymizeLabels) == 0 { - rlogger.Log(logger, rlogger.Warn, "msg", "not anonymizing any labels") - } - - // Configure a transformer. - if cfg.Transformer != nil { - transformer.With(cfg.Transformer) - } - if len(cfg.AnonymizeLabels) > 0 { - transformer.With(metricfamily.NewMetricsAnonymizer(anonymizeSalt, cfg.AnonymizeLabels, nil)) - } - +func CreateFromClient(cfg Config, interval time.Duration, name string, + logger log.Logger) (*metricsclient.Client, error) { fromTransport := metricsclient.DefaultTransport(logger, false) if len(cfg.FromCAFile) > 0 { if fromTransport.TLSClientConfig == nil { @@ -146,11 +126,11 @@ func createClients(cfg Config, interval time.Duration, } pool, err := x509.SystemCertPool() if err != nil { - return nil, nil, transformer, fmt.Errorf("failed to read system certificates: %v", err) + return nil, fmt.Errorf("failed to read system certificates: %v", err) } data, err := ioutil.ReadFile(cfg.FromCAFile) if err != nil { - return nil, nil, transformer, fmt.Errorf("failed to read from-ca-file: %v", err) + return nil, fmt.Errorf("failed to read from-ca-file: %v", err) } if !pool.AppendCertsFromPEM(data) { rlogger.Log(logger, rlogger.Warn, "msg", "no certs found in from-ca-file") @@ -174,18 +154,55 @@ func createClients(cfg Config, interval time.Duration, if len(cfg.FromToken) == 0 && len(cfg.FromTokenFile) > 0 { data, err := ioutil.ReadFile(cfg.FromTokenFile) if err != nil { - return nil, nil, transformer, fmt.Errorf("unable to read from-token-file: %v", err) + return nil, fmt.Errorf("unable to read from-token-file: %v", err) } cfg.FromToken = strings.TrimSpace(string(data)) } if len(cfg.FromToken) > 0 { fromClient.Transport = metricshttp.NewBearerRoundTripper(cfg.FromToken, fromClient.Transport) } + from := metricsclient.New(logger, fromClient, cfg.LimitBytes, interval, "federate_from") + return from, nil +} + +func createClients(cfg Config, interval time.Duration, + logger log.Logger) (*metricsclient.Client, *metricsclient.Client, metricfamily.MultiTransformer, error) { + + var transformer metricfamily.MultiTransformer + + // Configure the anonymization. + anonymizeSalt := cfg.AnonymizeSalt + if len(cfg.AnonymizeSalt) == 0 && len(cfg.AnonymizeSaltFile) > 0 { + data, err := ioutil.ReadFile(cfg.AnonymizeSaltFile) + if err != nil { + return nil, nil, transformer, fmt.Errorf("failed to read anonymize-salt-file: %v", err) + } + anonymizeSalt = strings.TrimSpace(string(data)) + } + if len(cfg.AnonymizeLabels) != 0 && len(anonymizeSalt) == 0 { + return nil, nil, transformer, fmt.Errorf("anonymize-salt must be specified if anonymize-labels is set") + } + if len(cfg.AnonymizeLabels) == 0 { + rlogger.Log(logger, rlogger.Warn, "msg", "not anonymizing any labels") + } + + // Configure a transformer. + if cfg.Transformer != nil { + transformer.With(cfg.Transformer) + } + if len(cfg.AnonymizeLabels) > 0 { + transformer.With(metricfamily.NewMetricsAnonymizer(anonymizeSalt, cfg.AnonymizeLabels, nil)) + } + from, err := CreateFromClient(cfg, interval, "federate_from", logger) + if err != nil { + return nil, nil, transformer, err + } + // Create the `toClient`. - toTransport, err := metricsclient.MTLSTransport(logger) + toTransport, err := metricsclient.MTLSTransport(logger, cfg.ToUploadCA, cfg.ToUploadCert, cfg.ToUploadKey) if err != nil { return nil, nil, transformer, errors.New(err.Error()) } diff --git a/collectors/metrics/pkg/metricfamily/label.go b/collectors/metrics/pkg/metricfamily/label.go index c39bf9787..cf5507752 100644 --- a/collectors/metrics/pkg/metricfamily/label.go +++ b/collectors/metrics/pkg/metricfamily/label.go @@ -49,7 +49,9 @@ func (t *label) Transform(family *clientmodel.MetricFamily) (bool, error) { return true, nil } -func appendLabels(existing []*clientmodel.LabelPair, overrides map[string]*clientmodel.LabelPair) []*clientmodel.LabelPair { +func appendLabels( + existing []*clientmodel.LabelPair, + overrides map[string]*clientmodel.LabelPair) []*clientmodel.LabelPair { var found []string for i, pair := range existing { name := pair.GetName() diff --git a/collectors/metrics/pkg/metricfamily/required.go b/collectors/metrics/pkg/metricfamily/required.go index 94afa61b5..3ea01b353 100644 --- a/collectors/metrics/pkg/metricfamily/required.go +++ b/collectors/metrics/pkg/metricfamily/required.go @@ -31,7 +31,12 @@ func (t requireLabel) Transform(family *clientmodel.MetricFamily) (bool, error) } if label.GetName() == k { if label.GetValue() != v { - return false, fmt.Errorf("expected label %s to have value %s instead of %s", label.GetName(), v, label.GetValue()) + return false, fmt.Errorf( + "expected label %s to have value %s instead of %s", + label.GetName(), + v, + label.GetValue(), + ) } continue Metrics } diff --git a/collectors/metrics/pkg/metricsclient/metricsclient.go b/collectors/metrics/pkg/metricsclient/metricsclient.go index e62f0743c..62a601404 100644 --- a/collectors/metrics/pkg/metricsclient/metricsclient.go +++ b/collectors/metrics/pkg/metricsclient/metricsclient.go @@ -14,6 +14,7 @@ import ( "net" "net/http" "os" + "path/filepath" "strconv" "strings" "time" @@ -93,7 +94,10 @@ type MetricsResult struct { Value []interface{} `json:"value"` } -func (c *Client) RetrievRecordingMetrics(ctx context.Context, req *http.Request, name string) ([]*clientmodel.MetricFamily, error) { +func (c *Client) RetrievRecordingMetrics( + ctx context.Context, + req *http.Request, + name string) ([]*clientmodel.MetricFamily, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) req = req.WithContext(ctx) @@ -366,18 +370,15 @@ func withCancel(ctx context.Context, client *http.Client, req *http.Request, fn return err } -func MTLSTransport(logger log.Logger) (*http.Transport, error) { +func MTLSTransport(logger log.Logger, caCertFile, tlsCrtFile, tlsKeyFile string) (*http.Transport, error) { testMode := os.Getenv("UNIT_TEST") != "" - caCertFile := "/tlscerts/ca/ca.crt" - tlsKeyFile := "/tlscerts/certs/tls.key" - tlsCrtFile := "/tlscerts/certs/tls.crt" if testMode { caCertFile = "../../testdata/tls/ca.crt" tlsKeyFile = "../../testdata/tls/tls.key" tlsCrtFile = "../../testdata/tls/tls.crt" } // Load Server CA cert - caCert, err := ioutil.ReadFile(caCertFile) + caCert, err := ioutil.ReadFile(filepath.Clean(caCertFile)) if err != nil { return nil, errors.Wrap(err, "failed to load server ca cert file") } diff --git a/collectors/metrics/test/integration/manifests/deployment.yaml b/collectors/metrics/test/integration/manifests/deployment.yaml index 8d4a245d6..f7c760ccc 100644 --- a/collectors/metrics/test/integration/manifests/deployment.yaml +++ b/collectors/metrics/test/integration/manifests/deployment.yaml @@ -104,7 +104,7 @@ spec: - name: HTTPS_PROXY - name: NO_PROXY image: {{ METRICS_COLLECTOR_IMAGE }} - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: metrics-collector ports: - containerPort: 8080 diff --git a/collectors/metrics/test/integration/manifests/observatorium-api.yaml b/collectors/metrics/test/integration/manifests/observatorium-api.yaml index 06186d02e..81d2f15cf 100644 --- a/collectors/metrics/test/integration/manifests/observatorium-api.yaml +++ b/collectors/metrics/test/integration/manifests/observatorium-api.yaml @@ -48,7 +48,7 @@ spec: - --tls.server.key-file=/etc/observatorium2/server.key - --tls.healthchecks.server-ca-file=/etc/observatorium2/server-ca.pem image: quay.io/observatorium/observatorium:latest - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: observatorium-api ports: - containerPort: 8081 diff --git a/docs/MultiClusterObservability-CRD.md b/docs/MultiClusterObservability-CRD.md index d47e26332..bab5e5091 100644 --- a/docs/MultiClusterObservability-CRD.md +++ b/docs/MultiClusterObservability-CRD.md @@ -42,7 +42,7 @@ Note: Disabling downsampling is not recommended as querying long time ranges wit corev1.PullPolicy - Pull policy of the MultiClusterObservability images. The default is Always. + Pull policy of the MultiClusterObservability images. The default is IfNotPresent. N @@ -225,7 +225,7 @@ The default is 10Gi string - The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. y diff --git a/docs/setup-ceph-for-object-storage.md b/docs/setup-ceph-for-object-storage.md index 630563be7..67d59d074 100644 --- a/docs/setup-ceph-for-object-storage.md +++ b/docs/setup-ceph-for-object-storage.md @@ -140,7 +140,7 @@ CDDQ0YU1C4A77A0GE54S ``` $ SECRET_KEY=$(oc -n rook-ceph get secret rook-ceph-object-user-object-object -o yaml | grep SecretKey | awk '{print $2}' | base64 --decode) $ echo $SECRET_KEY - +awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI ``` ### Expose Object Store externally @@ -168,7 +168,7 @@ AWS_ENDPOINT: `oc get service rook-ceph-rgw-object -n rook-ceph` and use `CLUSTE ``` [root@rook-ceph-tools /]# export AWS_ACCESS_KEY_ID=CDDQ0YU1C4A77A0GE54S -[root@rook-ceph-tools /]# export AWS_SECRET_ACCESS_KEY= +[root@rook-ceph-tools /]# export AWS_SECRET_ACCESS_KEY=awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI [root@rook-ceph-tools /]# export AWS_HOST=rook-ceph-rgw-object:8081 [root@rook-ceph-tools /]# export AWS_ENDPOINT=172.30.162.20:8081 ``` @@ -195,7 +195,7 @@ config: endpoint: rook-ceph-rgw-object-rook-ceph.apps.acm-hub.dev05.red-chesterfield.com insecure: true access_key: CDDQ0YU1C4A77A0GE54S - secret_key: + secret_key: awkEbItAs6OXsbOC6Qk7SX45h01GSw51z9SDasBI ``` ### Proceed with installation of ACM Observbility diff --git a/examples/alerts/watchdog_rule/kustomization.yaml b/examples/alerts/watchdog_rule/kustomization.yaml new file mode 100644 index 000000000..db9176867 --- /dev/null +++ b/examples/alerts/watchdog_rule/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- watchdog-rule.yaml diff --git a/examples/alerts/watchdog_rule/watchdog-rule.yaml b/examples/alerts/watchdog_rule/watchdog-rule.yaml new file mode 100644 index 000000000..57bdb28fe --- /dev/null +++ b/examples/alerts/watchdog_rule/watchdog-rule.yaml @@ -0,0 +1,22 @@ +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: watchdog-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: watchdog-rules + rules: + - alert: Watchdog + annotations: + description: | + This is an alert meant to ensure that the entire alerting pipeline is functional. + This alert is always firing, therefore it should always be firing in Alertmanager + and always fire against a receiver. There are integrations with various notification + mechanisms that send a notification when this alert is not firing. For example the + "DeadMansSnitch" integration in PagerDuty. + summary: An alert that should always be firing to certify that Alertmanager + is working properly. + expr: vector(1) + labels: + severity: none diff --git a/examples/dashboards/alert-dashboards/README.md b/examples/dashboards/alert-dashboards/README.md new file mode 100644 index 000000000..ff2b9f3fa --- /dev/null +++ b/examples/dashboards/alert-dashboards/README.md @@ -0,0 +1,11 @@ +# Alert Dashboards + +Included in this pack are 3 experimental dashboards meant to give an overview of Alerts: +- Alert Analysis - the overview dashboard containing both current and historical status with drill downs into the dashboards shown below. +- Clusters by Alert - choose alerts and see clusters effected in time. +- Alerts by Cluster - choose a cluster and see alerts firing on that cluster in time. + +Known Limitations: +1. These dashboards work well if used with ACM 2.4 where the Grafana Version is 8.*. +1. These dashboards are not aware of alerts that may have been suppressed in the Alertmanager configuration of ACM +1. There is one alert: ViolatedPolicyReport that appears without a cluster name in the dashboards. This will be addressed soon. diff --git a/examples/dashboards/alert-dashboards/alert-analysis.yaml b/examples/dashboards/alert-dashboards/alert-analysis.yaml new file mode 100644 index 000000000..aaa324d17 --- /dev/null +++ b/examples/dashboards/alert-dashboards/alert-analysis.yaml @@ -0,0 +1,743 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-custom-alert-analysis + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: 'true' +data: + alert-analysis.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 8, + "iteration": 1643209795645, + "links": [], + "panels": [ + { + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 20, + "title": "Real Time Data", + "type": "row" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 13, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Total Alerts", + "type": "stat" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto", + "filterable": true + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "cluster" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "Drill down to Alerts on this Cluster", + "url": "/d/7GMQIsJnz/alerts-by-cluster?orgId=1&var-cluster=${__data.fields.cluster}&from=now-1h&to=now" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "alertname" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "Drill down to Clusters with this Alert", + "url": "/d/UZJv0TJnz/clusters-by-alert?orgId=1&var-alert=${__data.fields.alertname}" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 4, + "y": 1 + }, + "id": 22, + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Value" + } + ] + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum (ALERTS{alertstate=\"firing\"}) by (cluster,alertname,severity)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Alerts and Clusters", + "type": "table" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 1 + }, + "id": 14, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\",severity=\"critical\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Total Critical Alerts", + "type": "stat" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 5 + }, + "id": 16, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\",severity=\"warning\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Total Warning Alerts", + "type": "stat" + }, + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 9 + }, + "id": 15, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\",severity=\"moderate\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Total Moderate Alerts", + "type": "stat" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 70, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\",severity=~\"$severity\"}) by (alertname)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "AlertType Over Time", + "type": "timeseries" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 35, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\", cluster!=\"\", severity=~\"$severity\"}) by (cluster)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Cluster Affected Over Time", + "type": "timeseries" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 18, + "panels": [ + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "displayName": "${__series.name}", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 23 + }, + "id": 10, + "options": { + "displayMode": "basic", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(sum_over_time(ALERTS{alertstate=\"firing\",severity=~\"$severity\"}[$__range])) by (alertname)", + "interval": "", + "legendFormat": "{{alertname}}", + "refId": "A" + } + ], + "title": "Most Firing Alerts", + "transformations": [], + "transparent": true, + "type": "bargauge" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 23 + }, + "id": 11, + "options": { + "displayMode": "basic", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(sum_over_time(ALERTS{alertstate=\"firing\", cluster!=\"\",severity=~\"$severity\"}[$__range])) by (cluster)", + "interval": "", + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "title": "Most Affected Clusters", + "transparent": true, + "type": "bargauge" + } + ], + "title": "Historical Analysis", + "type": "row" + } + ], + "refresh": "", + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "info", + "moderate", + "none", + "warning", + "critical" + ], + "value": [ + "info", + "moderate", + "none", + "warning", + "critical" + ] + }, + "datasource": null, + "definition": "label_values(ALERTS, severity)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": true, + "name": "severity", + "options": [], + "query": { + "query": "label_values(ALERTS, severity)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Alert Analysis", + "uid": "w1V3PRTnk", + "version": 8 + } diff --git a/examples/dashboards/alert-dashboards/alerts-by-cluster.yaml b/examples/dashboards/alert-dashboards/alerts-by-cluster.yaml new file mode 100644 index 000000000..b0481dab0 --- /dev/null +++ b/examples/dashboards/alert-dashboards/alerts-by-cluster.yaml @@ -0,0 +1,178 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-custom-alerts-by-cluster + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: 'true' +data: + alerts-by-cluster.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 26, + "iteration": 1642692464762, + "links": [], + "panels": [ + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 70, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\", cluster=\"$cluster\"}) by (alertname)", + "hide": false, + "interval": "", + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "title": "${cluster} - Alerts over time", + "type": "timeseries" + } + ], + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "cicd-aap-aas-ansible-a-eastus", + "value": "cicd-aap-aas-ansible-a-eastus" + }, + "datasource": null, + "definition": "label_values(ALERTS, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(ALERTS, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 5, + "type": "query" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Alerts by Cluster", + "uid": "7GMQIsJnz", + "version": 8 + } \ No newline at end of file diff --git a/examples/dashboards/alert-dashboards/clusters-by-alert.yaml b/examples/dashboards/alert-dashboards/clusters-by-alert.yaml new file mode 100644 index 000000000..e94d0bfdf --- /dev/null +++ b/examples/dashboards/alert-dashboards/clusters-by-alert.yaml @@ -0,0 +1,180 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-custom-clusters-by-alert + namespace: open-cluster-management-observability + labels: + grafana-custom-dashboard: 'true' +data: + clusters-by-alert.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 25, + "iteration": 1642692627538, + "links": [], + "panels": [ + { + "datasource": null, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 70, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(ALERTS{alertstate=\"firing\", alertname=\"$alert\"}) by (cluster)", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{cluster}}", + "refId": "A" + } + ], + "title": "Clusters with Alert - ${alert} ", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "Watchdog", + "value": "Watchdog" + }, + "datasource": null, + "definition": "label_values(ALERTS, alertname)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Alert", + "multi": false, + "name": "alert", + "options": [], + "query": { + "query": "label_values(ALERTS, alertname)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 5, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Clusters by Alert", + "uid": "UZJv0TJnz", + "version": 18 + } \ No newline at end of file diff --git a/examples/dashboards/alert-dashboards/kustomization.yaml b/examples/dashboards/alert-dashboards/kustomization.yaml new file mode 100644 index 000000000..e6020e0b7 --- /dev/null +++ b/examples/dashboards/alert-dashboards/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- alert-analysis.yaml +- alerts-by-cluster.yaml +- clusters-by-alert.yaml diff --git a/examples/gen.go b/examples/gen.go new file mode 100644 index 000000000..3547aad53 --- /dev/null +++ b/examples/gen.go @@ -0,0 +1,119 @@ +// Copyright (c) 2022 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +How to generate minio certs: +1. run `go run gen.go` to re-generate certs. +2. run `oc create secret generic minio-tls-secret --from-file=ca.crt=./minio-tls/certs/ca.crt --from-file=public.crt=./minio-tls/certs/public.crt --from-file=private.key=./minio-tls/certs/private.key --dry-run='client' -oyaml --namespace=open-cluster-management-observability > ./minio-tls/minio-tls-secret.yaml` to generate minio-tls-secret.yaml +*/ + +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "math/big" + "net" + "os" + "time" + + "github.com/cloudflare/cfssl/log" +) + +func main() { + certPath := "./minio/certs/public.crt" + privkeyPath := "./minio/certs/private.key" + caPath := "./minio/certs/ca.crt" + serverName := "minio" + var caRoot = &x509.Certificate{ + SerialNumber: big.NewInt(2019), + NotAfter: time.Now().AddDate(10, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + var cert = &x509.Certificate{ + SerialNumber: big.NewInt(1658), + DNSNames: []string{serverName}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, + NotAfter: time.Now().AddDate(10, 0, 0), + SubjectKeyId: []byte{1, 2, 3}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + + caPrivKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Error(err) + os.Exit(1) + } + + certPrivKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + log.Error(err) + os.Exit(1) + } + // Generate CA cert. + caBytes, err := x509.CreateCertificate(rand.Reader, caRoot, caRoot, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + log.Error(err) + os.Exit(1) + } + caPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + err = ioutil.WriteFile(caPath, caPEM, 0600) + if err != nil { + log.Error(err) + os.Exit(1) + } + + // Sign the cert with the CA private key. + certBytes, err := x509.CreateCertificate(rand.Reader, cert, caRoot, &certPrivKey.PublicKey, caPrivKey) + if err != nil { + log.Error(err) + os.Exit(1) + } + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + err = ioutil.WriteFile(certPath, certPEM, 0600) + if err != nil { + log.Error(err) + os.Exit(1) + } + + certPrivKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey), + }) + err = ioutil.WriteFile(privkeyPath, certPrivKeyPEM, 0600) + if err != nil { + log.Error(err) + os.Exit(1) + } + + os.Exit(0) +} diff --git a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml index 02f5f698e..d669314f3 100644 --- a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml +++ b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml @@ -9,7 +9,7 @@ spec: retentionResolution5m: 14d retentionResolutionRaw: 5d enableDownsampling: false - imagePullPolicy: Always + imagePullPolicy: IfNotPresent imagePullSecret: multiclusterhub-operator-pull-secret nodeSelector: kubernetes.io/os: linux diff --git a/examples/mco/e2e/v1beta2/custom-certs/kustomization.yaml b/examples/mco/e2e/v1beta2/custom-certs/kustomization.yaml new file mode 100644 index 000000000..f0c8a4650 --- /dev/null +++ b/examples/mco/e2e/v1beta2/custom-certs/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- observability.yaml diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml new file mode 100644 index 000000000..6137a66b6 --- /dev/null +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -0,0 +1,126 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + tlsSecretMountPath: /etc/minio/certs + tlsSecretName: minio-tls-secret + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 7497d4c36..ccac6d2bc 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -31,29 +31,39 @@ spec: cpu: 1 memory: 1Gi replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' compact: resources: limits: cpu: 1 memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' receive: resources: limits: cpu: 1 memory: 4Gi replicas: 2 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' rule: resources: limits: cpu: 1 memory: 1Gi replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' store: resources: limits: cpu: 1 memory: 2Gi replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' storeMemcached: resources: limits: diff --git a/examples/minio-tls/certs/ca.crt b/examples/minio-tls/certs/ca.crt new file mode 100644 index 000000000..172b102fb --- /dev/null +++ b/examples/minio-tls/certs/ca.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC3zCCAcegAwIBAgICB+MwDQYJKoZIhvcNAQELBQAwADAgGA8wMDAxMDEwMTAw +MDAwMFoXDTMyMDEyMDEyNTk1NVowADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMq13T+oqGSF+uwFvPXgkScafs7OD1iBWHOK+QIQAoVA+aL8uxmY20iW +dSH2LeZ1cqj+0Ob/7WJFJnnoSIvsg0w26CFScZ2WKT/n7DFUyAFqjCZbjMwggcvt +/viN2RnMvMmv+MJJDhLtW7F4wf2+reGzdY0RX98kW26l6PsFvgSylBANAFoF+/++ +Jp9VGCglcC3T7E8YQTTTqRvsIlzAHI2rWPVG93SGCj0BL87tGLZlqi8hKaBc5CHg +G0Jj8frgGClaLuX+uSsXXH5ZcvKTJvcxlUfpOV1PwXrI1uFI+kcf4DAdXT1g85yI +gWumH0UyG7SH8OoFoxCoSUXJ5adZrFUCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKE +MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBSXQwxG7Fmci4znvtv0c2MiXObRNzANBgkqhkiG9w0BAQsFAAOC +AQEAANagkqcD8RGoXoMsAjcECz3hpnv2MKOpUhnDMJKQf/iTy2G0YgyHERx5xCHm +tyHHeFq440126+PTV3CnIIobnbCtWGIcTloIHR4cJC0Er6Xqot0RE7XtESPHfo0v +/8/M9ad7uOBO8KpM1gNY4tPbY4fenquXeUdelUV5xEkIAOpAPeEJq1gPSbSh2vTP +Q/Cq/N8CH9zSd0cgQ3KkUI2Uyaq3HB8yR1zU6f6nfuKJQqKEMpUeeoRXWW5g/Iio +2auIoUx2Cpl4k01EXqKILOQG5o382svxSyHu4oHLRjS2XLIdcgyMYH68wrLKrk5m +zsvDxoSfQnTOcUyr0gZ6UQlGTg== +-----END CERTIFICATE----- diff --git a/examples/minio-tls/certs/private.key b/examples/minio-tls/certs/private.key new file mode 100644 index 000000000..411c967dd --- /dev/null +++ b/examples/minio-tls/certs/private.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAynowkn5VZMvKx8il10xKBfTXjgVC/iTJM+WKaAIGRSHyNOEA +mdw3Td14Gi/GmDHuh7ZdzEaznDygJemK4UkVCJFQonGIgGScyCz8A7/CLx1xj1pj +XAiQiTtLBKJIZ8IlOFDT/i0QWFEWxGU7M/2dYveOK/Gk5yqDDvE7LKFppsRmh5Mp +BRpqZOH2doLPNjWGe/QxTsqpssCJHo13AwYl6Gw0ka2cTWi8j1CM9FRbE15R6zgY +HcCPyyacfm61zwk0Y9Z91kCkte9fzVTl7A0A42txdv9MW9PWIlLMQYYT5XYopa7d +AkVSmIALQmQU/vAo99DyMYbwLCqdezFnfzsW4QIDAQABAoIBAQCw86adcHE6vOvE +O/PLu+15VShJAwowR3/brgCOsy49ntQpKHa5AH8IusOqxxQdkI2w500EKMnxgf5I +A9mO16rdKPo6bVRpOQTX4JCCykRvnqDPMqRhG08Nz9bY/uaeX0rd863BliqRtJRc +IUhjQIMaqdUDTJTcPgf5uBMKByrpRW2c5jgU1frOYlUM5HViK3cI27Cr46EnyzHp +K3zWuyyJ9MHgxaMurKZa7bY1KuBoR6+PT+TuAMk4XAbxtwj4L7BIWOqIyz/IWVXu +txhT0PSbCLNG2Z8Uf2dLqbTNCOjbdl86Igd2E2AN0TdtOH+SUHgnKIikRDZdZjBc +M4X3pgYdAoGBAPtXHCyjw1Rn1VDS4wJlgilo2LyOvho/unqSNJU/36/N04YcyiAE +J9+jXchMTk6Xy/vlqhbaEElkzcFbjGyYDmgDrd45k51TnWycR8kt/MvJTJC+t8F5 +Hkl6TtKCVtp1tdvMVrMaxYTxKbYVEkRQrkYdNM8wh2lJDsvjVaSBftpDAoGBAM47 +K55UTm4EUihEv7Uvj51B7+kEWBxihknQeNcGcy1tNBMVkPZYyB+yJFBVeRhoNWhv +zRIhADNWBdSkm2l1Ou+YrmDy2PaLTnClWgxkjplEamnrDiglTPAY203Uz1wHskV0 +NCT8uNA1nmw/WuKRsi8yjUxgQZup892Pq6iNZRILAoGBANh5uJyCF/SU0dehK0Cf +KTJ9SV9uGjIFfZmgX4XE0+ZZgrvaBAT7T1SBIn8nqbJrb5dvwcWY37o3Bjhpn1LQ +zGet84yZTpiMseQf3nxaDYd/w2BPw3jx1rI2k7kf5aqXA6FiJ7JvdnwAMorPL0KD +svZUpW1mQNrfv8y/6r2bgyR9AoGBAMVtKbWZubdilZANOIVocZC3NceZrAwIWcxC +MdyPRSXAvDBk3fCnT3XgYRkyqgmugSpolSX8x6dde5l0t36+hlwmixEuinuDIuV9 +tfXVMhoRv9mD93qBMuVKtm8NAjF1ALmyo9SgxwAzxGSWRhhOKAk82L28DDLMATQi +vM9QBOi/AoGBALz6q3DQeHAw89ZI/7gW+DWriv8hLeZ0Q0ESuWUkOK+tsMyZVw/c +pjEgqziulXL+Zr9JZrTWBGZcYCoQ4r6vpNxqVPVPXLNA8qglEN8TW5m9veevpZfj +0MYTV2ue4NqOkM1/J5M+9WoykhnHP/8L0N8+Ut6AwyUutaZCVA8Joyv0 +-----END RSA PRIVATE KEY----- diff --git a/examples/minio-tls/certs/public.crt b/examples/minio-tls/certs/public.crt new file mode 100644 index 000000000..8ccca936a --- /dev/null +++ b/examples/minio-tls/certs/public.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC6jCCAdKgAwIBAgICBnowDQYJKoZIhvcNAQELBQAwADAgGA8wMDAxMDEwMTAw +MDAwMFoXDTMyMDEyMDEyNTk1NVowADCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAMp6MJJ+VWTLysfIpddMSgX0144FQv4kyTPlimgCBkUh8jThAJncN03d +eBovxpgx7oe2XcxGs5w8oCXpiuFJFQiRUKJxiIBknMgs/AO/wi8dcY9aY1wIkIk7 +SwSiSGfCJThQ0/4tEFhRFsRlOzP9nWL3jivxpOcqgw7xOyyhaabEZoeTKQUaamTh +9naCzzY1hnv0MU7KqbLAiR6NdwMGJehsNJGtnE1ovI9QjPRUWxNeUes4GB3Aj8sm +nH5utc8JNGPWfdZApLXvX81U5ewNAONrcXb/TFvT1iJSzEGGE+V2KKWu3QJFUpiA +C0JkFP7wKPfQ8jGG8CwqnXsxZ387FuECAwEAAaNsMGowDgYDVR0PAQH/BAQDAgeA +MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAMBgNVHQ4EBQQDAQIDMCsG +A1UdEQEB/wQhMB+CBW1pbmlvhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqG +SIb3DQEBCwUAA4IBAQAxLHWZrUicEWXRJLwTiofQ3Q1G+GuCCE2CLGBmnXi7JUSX +7vVQT2DJs0Y1rjYjYNEnBdr/T86HhrFv8DSdvYycNtN6BZ6Mn/NWsp7bDMf2aHat +qMCN9GFTV4oIEvN00tJLVKcgHYbjbpkK5ShfryF2AXP9xKK52rvkbYbRAwEHsKXS +K565g8E0HFyu3yZbNrNmTg2fb8ODgOmRZuTclcapdnAx4IW3EMUziQJNottd3cTu +PzqS1W/Qata8BUeeGSw3jFdFe7R7xfadxKoVMbcZ7AlzavBpxC0ZbWEY4Tck3YIx +r4J029ijnyZ7M8ntO2FzH/H6+WYfIChKWkGZ7apu +-----END CERTIFICATE----- diff --git a/examples/minio-tls/kustomization.yaml b/examples/minio-tls/kustomization.yaml new file mode 100644 index 000000000..48044f89a --- /dev/null +++ b/examples/minio-tls/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- minio-deployment.yaml +- minio-pvc.yaml +- minio-tls-secret.yaml +- minio-service.yaml +- thanos-object-storage.yaml diff --git a/examples/minio-tls/minio-deployment.yaml b/examples/minio-tls/minio-deployment.yaml new file mode 100644 index 000000000..e804bd696 --- /dev/null +++ b/examples/minio-tls/minio-deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio + namespace: open-cluster-management-observability + labels: + app.kubernetes.io/name: minio +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: minio + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: minio + spec: + containers: + - command: + - /bin/sh + - -c + - mkdir -p /storage/thanos && /usr/bin/minio server --certs-dir /etc/minio/certs /storage + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + image: quay.io/minio/minio:RELEASE.2021-08-25T00-41-18Z + name: minio + ports: + - containerPort: 9000 + protocol: TCP + volumeMounts: + - mountPath: /storage + name: storage + - mountPath: /etc/minio/certs + name: tls-certs + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio + - name: tls-certs + secret: + items: + - key: ca.crt + path: CAs/ca.crt + - key: public.crt + path: public.crt + - key: private.key + path: private.key + secretName: minio-tls-secret diff --git a/examples/minio-tls/minio-pvc.yaml b/examples/minio-tls/minio-pvc.yaml new file mode 100644 index 000000000..43ed946a7 --- /dev/null +++ b/examples/minio-tls/minio-pvc.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app.kubernetes.io/name: minio + name: minio + namespace: open-cluster-management-observability +spec: + storageClassName: gp2 + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" diff --git a/examples/minio-tls/minio-service.yaml b/examples/minio-tls/minio-service.yaml new file mode 100644 index 000000000..655a2ee84 --- /dev/null +++ b/examples/minio-tls/minio-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: minio + namespace: open-cluster-management-observability +spec: + ports: + - port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app.kubernetes.io/name: minio + type: ClusterIP diff --git a/examples/minio-tls/minio-tls-secret.yaml b/examples/minio-tls/minio-tls-secret.yaml new file mode 100644 index 000000000..1cb22ae36 --- /dev/null +++ b/examples/minio-tls/minio-tls-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMzekNDQWNlZ0F3SUJBZ0lDQitNd0RRWUpLb1pJaHZjTkFRRUxCUUF3QURBZ0dBOHdNREF4TURFd01UQXcKTURBd01Gb1hEVE15TURFeU1ERXlOVGsxTlZvd0FEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQwpBUW9DZ2dFQkFNcTEzVCtvcUdTRit1d0Z2UFhna1NjYWZzN09EMWlCV0hPSytRSVFBb1ZBK2FMOHV4bVkyMGlXCmRTSDJMZVoxY3FqKzBPYi83V0pGSm5ub1NJdnNnMHcyNkNGU2NaMldLVC9uN0RGVXlBRnFqQ1piak13Z2djdnQKL3ZpTjJSbk12TW12K01KSkRoTHRXN0Y0d2YyK3JlR3pkWTBSWDk4a1cyNmw2UHNGdmdTeWxCQU5BRm9GKy8rKwpKcDlWR0NnbGNDM1Q3RThZUVRUVHFSdnNJbHpBSEkycldQVkc5M1NHQ2owQkw4N3RHTFpscWk4aEthQmM1Q0hnCkcwSmo4ZnJnR0NsYUx1WCt1U3NYWEg1WmN2S1RKdmN4bFVmcE9WMVB3WHJJMXVGSStrY2Y0REFkWFQxZzg1eUkKZ1d1bUgwVXlHN1NIOE9vRm94Q29TVVhKNWFkWnJGVUNBd0VBQWFOaE1GOHdEZ1lEVlIwUEFRSC9CQVFEQWdLRQpNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01DQmdnckJnRUZCUWNEQVRBUEJnTlZIUk1CQWY4RUJUQURBUUgvCk1CMEdBMVVkRGdRV0JCU1hRd3hHN0ZtY2k0em52dHYwYzJNaVhPYlJOekFOQmdrcWhraUc5dzBCQVFzRkFBT0MKQVFFQUFOYWdrcWNEOFJHb1hvTXNBamNFQ3ozaHBudjJNS09wVWhuRE1KS1FmL2lUeTJHMFlneUhFUng1eENIbQp0eUhIZUZxNDQwMTI2K1BUVjNDbklJb2JuYkN0V0dJY1Rsb0lIUjRjSkMwRXI2WHFvdDBSRTdYdEVTUEhmbzB2Ci84L005YWQ3dU9CTzhLcE0xZ05ZNHRQYlk0ZmVucXVYZVVkZWxVVjV4RWtJQU9wQVBlRUpxMWdQU2JTaDJ2VFAKUS9DcS9OOENIOXpTZDBjZ1EzS2tVSTJVeWFxM0hCOHlSMXpVNmY2bmZ1S0pRcUtFTXBVZWVvUlhXVzVnL0lpbwoyYXVJb1V4MkNwbDRrMDFFWHFLSUxPUUc1bzM4MnN2eFN5SHU0b0hMUmpTMlhMSWRjZ3lNWUg2OHdyTEtyazVtCnpzdkR4b1NmUW5UT2NVeXIwZ1o2VVFsR1RnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + private.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBeW5vd2tuNVZaTXZLeDhpbDEweEtCZlRYamdWQy9pVEpNK1dLYUFJR1JTSHlOT0VBCm1kdzNUZDE0R2kvR21ESHVoN1pkekVhem5EeWdKZW1LNFVrVkNKRlFvbkdJZ0dTY3lDejhBNy9DTHgxeGoxcGoKWEFpUWlUdExCS0pJWjhJbE9GRFQvaTBRV0ZFV3hHVTdNLzJkWXZlT0svR2s1eXFERHZFN0xLRnBwc1JtaDVNcApCUnBxWk9IMmRvTFBOaldHZS9ReFRzcXBzc0NKSG8xM0F3WWw2R3cwa2EyY1RXaThqMUNNOUZSYkUxNVI2emdZCkhjQ1B5eWFjZm02MXp3azBZOVo5MWtDa3RlOWZ6VlRsN0EwQTQydHhkdjlNVzlQV0lsTE1RWVlUNVhZb3BhN2QKQWtWU21JQUxRbVFVL3ZBbzk5RHlNWWJ3TENxZGV6Rm5menNXNFFJREFRQUJBb0lCQVFDdzg2YWRjSEU2dk92RQpPL1BMdSsxNVZTaEpBd293UjMvYnJnQ09zeTQ5bnRRcEtIYTVBSDhJdXNPcXh4UWRrSTJ3NTAwRUtNbnhnZjVJCkE5bU8xNnJkS1BvNmJWUnBPUVRYNEpDQ3lrUnZucURQTXFSaEcwOE56OWJZL3VhZVgwcmQ4NjNCbGlxUnRKUmMKSVVoalFJTWFxZFVEVEpUY1BnZjV1Qk1LQnlycFJXMmM1amdVMWZyT1lsVU01SFZpSzNjSTI3Q3I0NkVueXpIcApLM3pXdXl5SjlNSGd4YU11cktaYTdiWTFLdUJvUjYrUFQrVHVBTWs0WEFieHR3ajRMN0JJV09xSXl6L0lXVlh1CnR4aFQwUFNiQ0xORzJaOFVmMmRMcWJUTkNPamJkbDg2SWdkMkUyQU4wVGR0T0grU1VIZ25LSWlrUkRaZFpqQmMKTTRYM3BnWWRBb0dCQVB0WEhDeWp3MVJuMVZEUzR3SmxnaWxvMkx5T3Zoby91bnFTTkpVLzM2L04wNFljeWlBRQpKOStqWGNoTVRrNlh5L3ZscWhiYUVFbGt6Y0Ziakd5WURtZ0RyZDQ1azUxVG5XeWNSOGt0L012SlRKQyt0OEY1CkhrbDZUdEtDVnRwMXRkdk1Wck1heFlUeEtiWVZFa1JRcmtZZE5NOHdoMmxKRHN2alZhU0JmdHBEQW9HQkFNNDcKSzU1VVRtNEVVaWhFdjdVdmo1MUI3K2tFV0J4aWhrblFlTmNHY3kxdE5CTVZrUFpZeUIreUpGQlZlUmhvTldodgp6UkloQUROV0JkU2ttMmwxT3UrWXJtRHkyUGFMVG5DbFdneGtqcGxFYW1uckRpZ2xUUEFZMjAzVXoxd0hza1YwCk5DVDh1TkExbm13L1d1S1JzaTh5alV4Z1FadXA4OTJQcTZpTlpSSUxBb0dCQU5oNXVKeUNGL1NVMGRlaEswQ2YKS1RKOVNWOXVHaklGZlptZ1g0WEUwK1paZ3J2YUJBVDdUMVNCSW44bnFiSnJiNWR2d2NXWTM3bzNCamhwbjFMUQp6R2V0ODR5WlRwaU1zZVFmM254YURZZC93MkJQdzNqeDFySTJrN2tmNWFxWEE2RmlKN0p2ZG53QU1vclBMMEtECnN2WlVwVzFtUU5yZnY4eS82cjJiZ3lSOUFvR0JBTVZ0S2JXWnViZGlsWkFOT0lWb2NaQzNOY2VackF3SVdjeEMKTWR5UFJTWEF2REJrM2ZDblQzWGdZUmt5cWdtdWdTcG9sU1g4eDZkZGU1bDB0MzYraGx3bWl4RXVpbnVESXVWOQp0ZlhWTWhvUnY5bUQ5M3FCTXVWS3RtOE5BakYxQUxteW85U2d4d0F6eEdTV1JoaE9LQWs4MkwyOERETE1BVFFpCnZNOVFCT2kvQW9HQkFMejZxM0RRZUhBdzg5WkkvN2dXK0RXcml2OGhMZVowUTBFU3VXVWtPSyt0c015WlZ3L2MKcGpFZ3F6aXVsWEwrWnI5SlpyVFdCR1pjWUNvUTRyNnZwTnhxVlBWUFhMTkE4cWdsRU44VFc1bTl2ZWV2cFpmagowTVlUVjJ1ZTROcU9rTTEvSjVNKzlXb3lraG5IUC84TDBOOCtVdDZBd3lVdXRhWkNWQThKb3l2MAotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= + public.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2akNDQWRLZ0F3SUJBZ0lDQm5vd0RRWUpLb1pJaHZjTkFRRUxCUUF3QURBZ0dBOHdNREF4TURFd01UQXcKTURBd01Gb1hEVE15TURFeU1ERXlOVGsxTlZvd0FEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQwpBUW9DZ2dFQkFNcDZNSkorVldUTHlzZklwZGRNU2dYMDE0NEZRdjRreVRQbGltZ0NCa1VoOGpUaEFKbmNOMDNkCmVCb3Z4cGd4N29lMlhjeEdzNXc4b0NYcGl1RkpGUWlSVUtKeGlJQmtuTWdzL0FPL3dpOGRjWTlhWTF3SWtJazcKU3dTaVNHZkNKVGhRMC80dEVGaFJGc1JsT3pQOW5XTDNqaXZ4cE9jcWd3N3hPeXloYWFiRVpvZVRLUVVhYW1UaAo5bmFDenpZMWhudjBNVTdLcWJMQWlSNk5kd01HSmVoc05KR3RuRTFvdkk5UWpQUlVXeE5lVWVzNEdCM0FqOHNtCm5INXV0YzhKTkdQV2ZkWkFwTFh2WDgxVTVld05BT05yY1hiL1RGdlQxaUpTekVHR0UrVjJLS1d1M1FKRlVwaUEKQzBKa0ZQN3dLUGZROGpHRzhDd3FuWHN4WjM4N0Z1RUNBd0VBQWFOc01Hb3dEZ1lEVlIwUEFRSC9CQVFEQWdlQQpNQjBHQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01DQmdnckJnRUZCUWNEQVRBTUJnTlZIUTRFQlFRREFRSURNQ3NHCkExVWRFUUVCL3dRaE1CK0NCVzFwYm1sdmh3Ui9BQUFCaHhBQUFBQUFBQUFBQUFBQUFBQUFBQUFCTUEwR0NTcUcKU0liM0RRRUJDd1VBQTRJQkFRQXhMSFdaclVpY0VXWFJKTHdUaW9mUTNRMUcrR3VDQ0UyQ0xHQm1uWGk3SlVTWAo3dlZRVDJESnMwWTFyallqWU5FbkJkci9UODZIaHJGdjhEU2R2WXljTnRONkJaNk1uL05Xc3A3YkRNZjJhSGF0CnFNQ045R0ZUVjRvSUV2TjAwdEpMVktjZ0hZYmpicGtLNVNoZnJ5RjJBWFA5eEtLNTJydmtiWWJSQXdFSHNLWFMKSzU2NWc4RTBIRnl1M3laYk5yTm1UZzJmYjhPRGdPbVJadVRjbGNhcGRuQXg0SVczRU1VemlRSk5vdHRkM2NUdQpQenFTMVcvUWF0YThCVWVlR1N3M2pGZEZlN1I3eGZhZHhLb1ZNYmNaN0FsemF2QnB4QzBaYldFWTRUY2szWUl4CnI0SjAyOWlqbnlaN004bnRPMkZ6SC9INitXWWZJQ2hLV2tHWjdhcHUKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +kind: Secret +metadata: + creationTimestamp: null + name: minio-tls-secret + namespace: open-cluster-management-observability diff --git a/examples/minio-tls/thanos-object-storage.yaml b/examples/minio-tls/thanos-object-storage.yaml new file mode 100644 index 000000000..ea4f53724 --- /dev/null +++ b/examples/minio-tls/thanos-object-storage.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +stringData: + thanos.yaml: | + type: s3 + config: + bucket: "thanos" + endpoint: "minio:9000" + insecure: false + access_key: "minio" + secret_key: "minio123" + http_config: + tls_config: + ca_file: /etc/minio/certs/ca.crt + cert_file: /etc/minio/certs/public.crt + key_file: /etc/minio/certs/private.key + insecure_skip_verify: false +kind: Secret +metadata: + name: thanos-object-storage + namespace: open-cluster-management-observability +type: Opaque diff --git a/examples/minio/minio-secret.yaml b/examples/minio/minio-secret.yaml index 584060d82..daed6e47a 100644 --- a/examples/minio/minio-secret.yaml +++ b/examples/minio/minio-secret.yaml @@ -1,6 +1,13 @@ apiVersion: v1 -data: - thanos.yaml: dHlwZTogczMKY29uZmlnOgogIGJ1Y2tldDogInRoYW5vcyIKICBlbmRwb2ludDogIm1pbmlvOjkwMDAiCiAgaW5zZWN1cmU6IHRydWUKICBhY2Nlc3Nfa2V5OiAibWluaW8iCiAgc2VjcmV0X2tleTogIm1pbmlvMTIzIg== +stringData: + thanos.yaml: | + type: s3 + config: + bucket: "thanos" + endpoint: "minio:9000" + insecure: true + access_key: "minio" + secret_key: "minio123" kind: Secret metadata: name: thanos-object-storage diff --git a/go.mod b/go.mod index fcfd5da77..542da2cf0 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/cloudflare/cfssl v1.6.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-kit/kit v0.11.0 + github.com/go-kit/log v0.1.0 github.com/go-logr/logr v0.4.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 @@ -15,7 +16,7 @@ require ( github.com/hashicorp/go-version v1.3.0 github.com/oklog/run v1.1.0 github.com/onsi/ginkgo v1.16.4 - github.com/onsi/gomega v1.15.0 + github.com/onsi/gomega v1.16.0 github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible github.com/openshift/client-go v0.0.0-20210916133943-9acee1a0fb83 github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660 @@ -31,8 +32,8 @@ require ( github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99 - github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f - github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799 + github.com/stolostron/multiclusterhub-operator v0.0.0-20220106205009-2af6f43fd562 + github.com/stolostron/observatorium-operator v0.0.0-20220307015247-f9eb849e218e github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.22.1 @@ -43,8 +44,8 @@ require ( k8s.io/kubectl v0.21.2 open-cluster-management.io/addon-framework v0.0.0-20211014025435-1f42884cdd53 open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9 - sigs.k8s.io/controller-runtime v0.9.7 - sigs.k8s.io/kube-storage-version-migrator v0.0.3 + sigs.k8s.io/controller-runtime v0.10.0 + sigs.k8s.io/kube-storage-version-migrator v0.0.4 sigs.k8s.io/kustomize/api v0.8.8 sigs.k8s.io/kustomize/v3 v3.3.1 sigs.k8s.io/yaml v1.2.0 @@ -67,17 +68,18 @@ require ( github.com/blang/semver v3.5.1+incompatible // indirect github.com/brancz/locutus v0.0.0-20210511124350-7a84f4d1bcb3 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/containerd/containerd v1.5.10 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect + github.com/docker/distribution v2.8.0+incompatible // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/efficientgo/tools/core v0.0.0-20210201224146-3d78f4d30648 // indirect github.com/emicklei/go-restful v2.14.2+incompatible // indirect github.com/evanphx/json-patch v4.11.0+incompatible // indirect - github.com/fatih/color v1.10.0 // indirect + github.com/fatih/color v1.12.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect - github.com/go-kit/log v0.1.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect github.com/go-logr/zapr v0.4.0 // indirect github.com/go-openapi/analysis v0.20.0 // indirect @@ -102,10 +104,9 @@ require ( github.com/hashicorp/consul/api v1.10.0 // indirect github.com/hashicorp/go-hclog v0.14.1 // indirect github.com/hashicorp/go-immutable-radix v1.3.0 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 // indirect + github.com/jmoiron/sqlx v1.3.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.11 // indirect @@ -118,7 +119,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/openshift/library-go v0.0.0-20210916194400-ae21aab32431 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -150,6 +151,7 @@ require ( gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -172,27 +174,22 @@ replace ( github.com/metal3-io/baremetal-operator => github.com/openshift/baremetal-operator v0.0.0-20200715132148-0f91f62a41fe github.com/metal3-io/cluster-api-provider-baremetal => github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d github.com/openshift/api => github.com/openshift/api v0.0.0-20210331193751-3acddb19d360 - github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20210802140536-4d8d83dcd464 github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20210811141203-dcb07e8eac34 github.com/terraform-providers/terraform-provider-aws => github.com/openshift/terraform-provider-aws v1.60.1-0.20200630224953-76d1fb4e5699 github.com/terraform-providers/terraform-provider-azurerm => github.com/openshift/terraform-provider-azurerm v1.40.1-0.20200707062554-97ea089cc12a github.com/terraform-providers/terraform-provider-ignition/v2 => github.com/community-terraform-providers/terraform-provider-ignition/v2 v2.1.0 golang.org/x/text => golang.org/x/text v0.3.5 - k8s.io/api => k8s.io/api v0.21.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.3 - k8s.io/client-go => k8s.io/client-go v0.21.0 + k8s.io/client-go => k8s.io/client-go v0.22.1 k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 kubevirt.io/client-go => kubevirt.io/client-go v0.29.0 - open-cluster-management.io/addon-framework => open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1 // HiveConfig import dependancies - sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837 - sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b - sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e + sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201022175424-d30c7a274820 + sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852-4090a6970205 + sigs.k8s.io/cluster-api-provider-openstack => github.com/openshift/cluster-api-provider-openstack v0.0.0-20201116051540-155384b859c5 sigs.k8s.io/kube-storage-version-migrator => github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78 ) // needed because otherwise installer fetches a library-go version that requires bitbucket.com/ww/goautoneg which is dead // Tagged version fetches github.com/munnerz/goautoneg instead -replace github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe +replace github.com/openshift/library-go => github.com/openshift/library-go v0.0.0-20210916194400-ae21aab32431 diff --git a/go.sum b/go.sum index d4d2c0b1d..0411904a3 100644 --- a/go.sum +++ b/go.sum @@ -83,6 +83,7 @@ github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v40.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v42.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v42.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v43.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v45.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -111,7 +112,6 @@ github.com/Azure/go-autorest/autorest v0.11.19 h1:7/IqD2fEYVha1EPeaiytVKhzmPV223 github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -164,6 +164,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/ChrisTrenkamp/goxpath v0.0.0-20190607011252-c5096ec8773d/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -173,6 +174,7 @@ github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/controller-filtered-cache v0.3.2/go.mod h1:gEDzSQxUwcdScwsw59MTwchTjh6vzLWaSPffIkr85U4= github.com/IBM/controller-filtered-cache v0.3.3 h1:B8INm/FDR5akkOBADNzXFdVFLf+8gXtVatcEP8yQvTM= github.com/IBM/controller-filtered-cache v0.3.3/go.mod h1:gEDzSQxUwcdScwsw59MTwchTjh6vzLWaSPffIkr85U4= github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= @@ -182,15 +184,20 @@ github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YH github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= +github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.3/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -211,6 +218,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -223,6 +231,7 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -238,6 +247,7 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrU github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -343,8 +353,19 @@ github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.40.10 h1:h+xUINuuH/9CwxE7O8mAuW7Aj9E5agfE9jQ3DrJsnA8= github.com/aws/aws-sdk-go v1.40.10/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= +github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= +github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= +github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= +github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= @@ -368,6 +389,7 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -418,11 +440,11 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/centrify/cloud-golang-sdk v0.0.0-20190214225812-119110094d0f/go.mod h1:C0rtzmGXgN78pYR0tGJFhtHgkbAs0lIbHwkB81VxDQE= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -432,6 +454,7 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= @@ -444,6 +467,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= @@ -498,19 +522,21 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= +github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= @@ -540,6 +566,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -562,10 +589,12 @@ github.com/coredns/coredns v1.1.2/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/ github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -637,6 +666,7 @@ github.com/davegardnerisme/deephash v0.0.0-20210406090112-6d072427d830/go.mod h1 github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= +github.com/deislabs/oras v0.11.1/go.mod h1:39lCtf8Q6WDC7ul9cnyWXONNzKvabEKk+AX+L0ImnQk= github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0= github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= @@ -664,17 +694,20 @@ github.com/dmacvicar/terraform-provider-libvirt v0.6.2/go.mod h1:rUzijwUJHukJWZK github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -688,9 +721,9 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= @@ -718,6 +751,7 @@ github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6 github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -743,6 +777,7 @@ github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLw github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM= github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= +github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -758,8 +793,9 @@ github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwo github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= @@ -784,7 +820,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= -github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= @@ -795,7 +830,6 @@ github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYis github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -817,8 +851,12 @@ github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6 github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -847,7 +885,6 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-logr/zapr v0.3.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -911,6 +948,7 @@ github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= @@ -993,6 +1031,7 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gocql/gocql v0.0.0-20200121121104-95d072f1b5bb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= @@ -1003,6 +1042,7 @@ github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblf github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -1012,6 +1052,7 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -1036,7 +1077,6 @@ github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191002201903-404acd9df4cc/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1053,7 +1093,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1140,7 +1179,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-jsonnet v0.17.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= github.com/google/go-metrics-stackdriver v0.0.0-20190816035513-b52628e82e2a/go.mod h1:o93WzqysX0jP/10Y13hfL6aq9RoUvGaVdkrH5awMksE= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -1208,7 +1247,6 @@ github.com/gophercloud/gophercloud v0.6.1-0.20191025185032-6ad562af8c1f/go.mod h github.com/gophercloud/gophercloud v0.6.1-0.20191122030953-d8ac278c1c9d/go.mod h1:ozGNgr9KYOVATV5jsgHl/ceCDXGuguqOZAzoQ/2vcNM= github.com/gophercloud/gophercloud v0.10.1-0.20200424014253-c3bfe50899e5/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.12.1-0.20200821143728-362eb785d617/go.mod h1:w2NJEd88d4igNL1KUHzBsKMvS/ByJTzgltTGWKT7AC8= github.com/gophercloud/gophercloud v0.12.1-0.20200827191144-bb4781e9de45/go.mod h1:w2NJEd88d4igNL1KUHzBsKMvS/ByJTzgltTGWKT7AC8= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.19.0 h1:zzaIh8W2K5M4AkJhPV1z6O4Sp0FOObzXm61NUmFz3Kw= @@ -1217,8 +1255,6 @@ github.com/gophercloud/utils v0.0.0-20190124231947-9c3b9f2457ef/go.mod h1:wjDF8z github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= github.com/gophercloud/utils v0.0.0-20190313033024-0bcc8e728cb5/go.mod h1:SZ9FTKibIotDtCrxAU/evccoyu1yhKST6hgBvwTB5Eg= github.com/gophercloud/utils v0.0.0-20200423144003-7c72efc7435d/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= -github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= -github.com/gophercloud/utils v0.0.0-20200918191848-da0e919a012a/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20201101202656-8677e053dcf1/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20201212031956-9dc30e126fea/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= github.com/gophercloud/utils v0.0.0-20210113034859-6f548432055a/go.mod h1:ehWUbLQJPqS0Ep+CxeD559hsm9pthPXadJNKwZkp43w= @@ -1271,6 +1307,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= +github.com/grpc-ecosystem/grpc-health-probe v0.3.2/go.mod h1:izVOQ4RWbjUR6lm4nn+VLJyQ+FyaiGmprEYgI04Gs7U= github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/aws-sdk-go-base v0.4.0/go.mod h1:eRhlz3c4nhqxFZJAahJEFL7gh6Jyj5rQmQc7F9eHFyQ= @@ -1511,6 +1548,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= @@ -1572,8 +1610,9 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= +github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/johannesboyne/gofakes3 v0.0.0-20200218152459-de0855a40bc1/go.mod h1:fNiSoOiEI5KlkWXn26OwKnNe58ilTIkpBlgOrt7Olu8= @@ -1592,6 +1631,7 @@ github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+ github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1620,9 +1660,9 @@ github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0L github.com/katbyte/terrafmt v0.2.1-0.20200303174203-e6a3e82cb21b/go.mod h1:WRq5tDmK04tcYbEr400zAUWtOK0jix54e8YeHP3IoQg= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/keybase/go-crypto v0.0.0-20190828182435-a05457805304/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -1652,6 +1692,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -1678,6 +1719,7 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libvirt/libvirt-go v4.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= github.com/libvirt/libvirt-go v5.0.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= github.com/libvirt/libvirt-go v5.10.0+incompatible/go.mod h1:34zsnB4iGeOv7Byj6qotuW8Ya4v4Tr43ttjz/F0wjLE= @@ -1735,8 +1777,8 @@ github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88/go.mod h1:a2HXwefe github.com/matoous/godox v0.0.0-20190910121045-032ad8106c86/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matoous/godox v0.0.0-20200801072554-4fb83dc2941e/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8= github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= @@ -1760,6 +1802,7 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1770,10 +1813,12 @@ github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= @@ -1808,6 +1853,7 @@ github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0 github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= +github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= @@ -1823,6 +1869,7 @@ github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXx github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.1.1/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1920,7 +1967,6 @@ github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nirarg/terraform-provider-kubevirt v0.0.0-20201222125919-101cee051ed3/go.mod h1:FMugN9a6XOJm9mLFEV/+F4IJzdZmpLn/OaNRa8S/Ens= github.com/nishanths/exhaustive v0.0.0-20200811152831-6cf413ae40e0/go.mod h1:wBEpHwM2OdmeNpdCvRPUlkEbBuaFmcK4Wv8Q7FuGW3c= @@ -1943,7 +1989,6 @@ github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2f github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1960,7 +2005,7 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1972,15 +2017,21 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-cluster-management/api v0.0.0-20210511122802-f38973154cbd/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= -github.com/open-cluster-management/multicloud-operators-channel v1.0.1-0.20200604182604-841a36e63aa9/go.mod h1:IIME1guAHnyD0E6/Z6H8UjPflJvdAkNUoAw7cuTkbaY= -github.com/open-cluster-management/multicloud-operators-deployable v0.0.0-20200603180154-d1d17d718c30/go.mod h1:ysimCqQtXj9F+LPHKRZoFkN/d/pXYfZTsF5bqkdEaNs= -github.com/open-cluster-management/multicloud-operators-placementrule v1.0.1-2020-05-28-18-29-00.0.20200603172904-efde26079087/go.mod h1:871ea21VnKsCByS6u8fHXXTW94PlIWdQKw5bf/amBr8= -github.com/open-cluster-management/multicloud-operators-subscription v1.0.0-2020-05-12-21-17-19.0.20200610014526-1e0e8c0acfad/go.mod h1:CFx6SlVQtyvlYUK4KEBCAXXVwEk2tTBuJsyb+LfU89g= -github.com/open-cluster-management/multicloud-operators-subscription-release v1.0.1-2020-05-28-18-29-00.0.20200603160156-4d66bd136ba3/go.mod h1:UTzlywmLkZVU8kckKskqmXhAO/JB0OXQHk1+DvEDBeM= +github.com/open-cluster-management/api v0.0.0-20200903203421-64b667f5455c/go.mod h1:F1hDJHtWuV7BAUtfL4XRS9GZjUpksleLgEcisNXvQEw= +github.com/open-cluster-management/api v0.0.0-20210513122330-d76f10481f05/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= +github.com/open-cluster-management/backplane-operator v1.0.0-2021-10-26-20-16-14/go.mod h1:ElzcRdjph1j8G59QMzO17Kfr99Pew0DP+TdCuxGiB2Y= +github.com/open-cluster-management/klusterlet-addon-controller v0.0.0-20210303215539-1d12cebe6f19/go.mod h1:YWcjLe+zqmdqPFvwzASizCa8JdXe5briBgxoP+r3CRM= +github.com/open-cluster-management/library-e2e-go v0.0.0-20200620112055-c80fc3c14997/go.mod h1:glvUOJg5EAb1Lq0OPcYhl57PJTxN2T5xuSfdV3Iab4Y= +github.com/open-cluster-management/library-go v0.0.0-20200828173847-299c21e6c3fc/go.mod h1:X9KdKajEnrx6+LQSjo+61gLdk4Ntmwp+YEYD0CIJT7I= +github.com/open-cluster-management/multicloud-operators-channel v1.2.4-0-20210817-0d8714a/go.mod h1:j8VcKJTJ6FOs5I17+anmcopgDuZYUua9vJDQ6hQVcMM= +github.com/open-cluster-management/multicloud-operators-deployable v1.2.4-0-20210816-f9fe854/go.mod h1:lJoxNuBDqFO6TUj4xFLbep2TL1O3YQVGAGBRAt2Ah0g= +github.com/open-cluster-management/multicloud-operators-placementrule v1.2.4-0-20210816-699e5/go.mod h1:qB8YBquVYtr/h/U7DTGL+GwXPxuIinX4OpLW3s86xGY= +github.com/open-cluster-management/multicloud-operators-subscription v1.2.4-0-20210915-3eda062.0.20210923221443-91919cf42424/go.mod h1:uPX0peAbYWJzV7sYomzwPzUGLFzlQtOSOc+2lUxWlgE= +github.com/open-cluster-management/multicloud-operators-subscription-release v1.2.4-0-20210823-a494067/go.mod h1:WJpyiqWneSC3a11lDe7hB8UxY8HUw/K/Pv1cW9X4ogQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1989,24 +2040,25 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/openshift-metal3/terraform-provider-ironic v0.2.3/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/openshift-metal3/terraform-provider-ironic v0.2.4/go.mod h1:ux2W6gsCIYsY/fX5N0V0ZgwFEBNN7P8g6RlH6ACi97k= github.com/openshift/api v0.0.0-20210331193751-3acddb19d360 h1:EGWKZ4foeELg9R+0OaLXKUoqHmtUwAMq0fCBUirbKwY= github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= @@ -2015,19 +2067,23 @@ github.com/openshift/build-machinery-go v0.0.0-20200713135615-1f43d26dccc7/go.mo github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210115170933-e575b44a7a94/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/build-machinery-go v0.0.0-20210423112049-9415d7ebd33e/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49 h1:7NmjUkJtGHpMTE/n8ia6itbCdZ7eYuTCXKc/zsA7OSM= +github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= +github.com/openshift/client-go v0.0.0-20191125132246-f6563a70e19a/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= +github.com/openshift/client-go v0.0.0-20200326155132-2a6cd50aedd0/go.mod h1:uUQ4LClRO+fg5MF/P6QxjMCb1C9f7Oh4RKepftDnEJE= +github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5/go.mod h1:5rGmrkQ8DJEUXA+AR3rEjfH+HFyg4/apY9iCQFgvPfE= +github.com/openshift/client-go v0.0.0-20201020074620-f8fd44879f7c/go.mod h1:yZ3u8vgWC19I9gbDMRk8//9JwG/0Sth6v7C+m6R8HXs= github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= +github.com/openshift/client-go v0.0.0-20210916133943-9acee1a0fb83 h1:TGBy40xVBCqDqvu8gaakva4u+08JtOt/LfekiwbCMyc= +github.com/openshift/client-go v0.0.0-20210916133943-9acee1a0fb83/go.mod h1:iSeqKIqUKxVec3gV1kNvwS1tjDpzpdP134RimkLc3BE= github.com/openshift/cloud-credential-operator v0.0.0-20200316201045-d10080b52c9e/go.mod h1:iPn+uhIe7nkP5BMHe2QnbLtg5m/AIQ1xvz9s3cig5ss= github.com/openshift/cluster-api v0.0.0-20190805113604-f8de78af80fc/go.mod h1:mNsD1dsD4T57kV4/C6zTHke/Ro166xgnyyRZqkamiEU= github.com/openshift/cluster-api v0.0.0-20191030113141-9a3a7bbe9258/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= github.com/openshift/cluster-api v0.0.0-20191129101638-b09907ac6668/go.mod h1:T18COkr6nLh9RyZKPMP7YjnwBME7RX8P2ar1SQbBltM= github.com/openshift/cluster-api-actuator-pkg v0.0.0-20190614215203-42228d06a2ca/go.mod h1:KNPaA64x3Ok7z538kvS2acwC5fEwvPfF0RdTx2geQEE= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20200506073438-9d49428ff837/go.mod h1:aXOt4gMtzXQxymPRm98vJAVmGjDhcTXsrQHauiNJK3o= -github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20200120114645-8a9592f1f87b/go.mod h1:LPNjFna6F+ePHaXM/7QIyCF0sLsEtfuN16yY9sFZJ40= +github.com/openshift/cluster-api-provider-aws v0.2.1-0.20201022175424-d30c7a274820/go.mod h1:rDwmh/vpz6mUU/l9QLWeaoGpUeC+b3yyI34xnp3tIf8= +github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20201016155852-4090a6970205/go.mod h1:oOG/TNSBse4brosfLCH/G2Q/42ye+DZQq8VslA5SxOs= github.com/openshift/cluster-api-provider-baremetal v0.0.0-20190821174549-a2a477909c1d/go.mod h1:S+wtA0Rm2FZ5ccC9zNQXUWUDesR6Jsdn5eb6HjAR+Gs= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200120152131-1b09fd9e7156/go.mod h1:KCyjaBfEkifs9bqV1HEXDJUyQylgeLSqiqt2QnMn7is= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200701112720-3a7d727c9a10/go.mod h1:wgkZrOlcIMWTzo8khB4Js2PoDJDlIUUdzCBm7BuDdqw= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200713133651-5c8a640669ac/go.mod h1:XVYX9JE339nKbDDa/W481XD+1GTeqeaBm8bDPr7WE7I= github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200901173901-9056dbc8c9b9/go.mod h1:rcwAydGZX+z4l91wtOdbq+fqDwuo6iu0YuFik3UUc+8= @@ -2036,28 +2092,22 @@ github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201027164920-70f2f92e64 github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201203141909-4dc702fd57a5/go.mod h1:/XjFaKnqBc8K/jcRXHO7tau39CmzNinqmpxYaQGRvnE= github.com/openshift/cluster-api-provider-kubevirt v0.0.0-20201214114543-e5aed9c73f1f/go.mod h1:Moiq8vUJ4IdTaJBxIA756FFJ4GgVXZAiOds7lTpZ1kQ= github.com/openshift/cluster-api-provider-libvirt v0.2.1-0.20191219173431-2336783d4603/go.mod h1:7pQ9Bzha+ug/5zd+0ufbDEcnn2OnNlPwRwYrzhXk4NM= -github.com/openshift/cluster-api-provider-openstack v0.0.0-20200526112135-319a35b2e38e/go.mod h1:1DDDZ7uXsauiUvCDUxq6XmsToaTh9WipPoW1qASkL9c= +github.com/openshift/cluster-api-provider-openstack v0.0.0-20201116051540-155384b859c5/go.mod h1:vhv3G5oWIev2paJytOa57cXUYdzEyVgoUMwU3uuOB7Y= github.com/openshift/cluster-api-provider-ovirt v0.1.1-0.20200504092944-27473ea1ae43/go.mod h1:Vl/bvZulLw6PdUADIFWGfoTWH1O4L1B80eN7BtLYEuo= github.com/openshift/cluster-autoscaler-operator v0.0.0-20190521201101-62768a6ba480/go.mod h1:/XmV44Fh28Vo3Ye93qFrxAbcFJ/Uy+7LPD+jGjmfJYc= github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660 h1:Uu4FRbRt8SvN85H+HXshPcDYepPnVp4Ju9VZZFxcDhU= github.com/openshift/cluster-monitoring-operator v0.1.1-0.20210611103744-7168290cd660/go.mod h1:0/kDYY2vkaFz/O1ZpFyzMKCULRIt5Bg9SBN2+iYZ2qs= -github.com/openshift/cluster-version-operator v3.11.1-0.20190629164025-08cac1c02538+incompatible/go.mod h1:0BbpR1mrN0F2ZRae5N1XHcytmkvVPaeKgSQwRRBWugc= github.com/openshift/custom-resource-status v0.0.0-20190822192428-e62f2f3b79f3/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/generic-admission-server v1.14.1-0.20200903115324-4ddcdd976480/go.mod h1:OAHL5WnZphlhVEf5fTdeGLvNwMu1B2zCWpmxJpCA35o= -github.com/openshift/hive v1.0.18-0.20210129211840-21bce609f1f4/go.mod h1:Hv5v6eoEY4SrYZE2TWA7euqY5ldyxg+YH6pR9RzUv8I= github.com/openshift/hive v1.1.16/go.mod h1:QJY97wHcEv7LTCB5tStmo9JT6E2LHSF8m73fFVz/Aj8= github.com/openshift/hive/apis v0.0.0-20210802140536-4d8d83dcd464/go.mod h1:77ODrnaHiDlfbqQgvk5nUWuqf2AsGY/99QlfNTiqHwI= -github.com/openshift/installer v0.9.0-master.0.20201103204150-888dc5bab60c/go.mod h1:FsTPqP4aUu5/bcxbtRyWFF7n/YXR/d/VRe7+hTNldGA= github.com/openshift/installer v0.9.0-master.0.20210211002944-d237b9dee575/go.mod h1:SWnBsRHNJNZCUJUMyyWbxljcePsR/hCUEY38GwhSIA8= github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78 h1:BxSKP/SI7A42pPCJfOM1pj5uU5A8L5mCz3Vyer2l88U= github.com/openshift/kubernetes-kube-storage-version-migrator v0.0.3-0.20210302135122-481bd04dbc78/go.mod h1:Ld7NVItdAwI99lCHbys/n88rIMQjeS7PyXA4NL4yImM= -github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe h1:MJqGN0NVONnTLDYPVIEH4uo6i3gAK7LAkhLnyFO8Je0= -github.com/openshift/library-go v0.0.0-20200918101923-1e4c94603efe/go.mod h1:NI6xOQGuTnLXeHW8Z2glKSFhF7X+YxlAlqlBMaK0zEM= +github.com/openshift/library-go v0.0.0-20210916194400-ae21aab32431 h1:Fdg8exEe5DWwY3xmOxXy42L8K0N0nQA/q/i922kdD5M= +github.com/openshift/library-go v0.0.0-20210916194400-ae21aab32431/go.mod h1:FS2LWYy/Hf4+2qfMZnh1Mt7EoOkc79x58xg02sidhn8= github.com/openshift/machine-api-operator v0.0.0-20190312153711-9650e16c9880/go.mod h1:7HeAh0v04zQn1L+4ItUjvpBQYsm2Nf81WaZLiXTcnkc= -github.com/openshift/machine-api-operator v0.2.1-0.20191128180243-986b771e661d/go.mod h1:9qQPF00anuIsc6RiHYfHE0+cZZImbvFNLln0NRBVVMg= -github.com/openshift/machine-api-operator v0.2.1-0.20200402110321-4f3602b96da3/go.mod h1:46g2eLjzAcaNURYDvhGu0GhyjKzOlCPqixEo68lFBLs= -github.com/openshift/machine-api-operator v0.2.1-0.20200429102619-d36974451290/go.mod h1:QkhH+/6BXabl+4HmiLwx9/bmW1ieCGF9km7xz22Ozl0= github.com/openshift/machine-api-operator v0.2.1-0.20200611014855-9a69f85c32dd/go.mod h1:6vMi+R3xqznBdq5rgeal9N3ak3sOpy50t0fdRCcQXjE= github.com/openshift/machine-api-operator v0.2.1-0.20200701225707-950912b03628/go.mod h1:cxjy/RUzv5C2T5FNl1KKXUgtakWsezWQ642B/CD9VQA= github.com/openshift/machine-api-operator v0.2.1-0.20200722104429-f4f9b84df9b7/go.mod h1:XDsNRAVEJtkI00e51SAZ/PnqNJl1zv0rHXSdl9L1oOY= @@ -2087,31 +2137,32 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= -github.com/operator-framework/api v0.3.4/go.mod h1:TmRmw+8XOUaDPq6SP9gA8cIexNf/Pq8LMFY7YaKQFTs= -github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/api v0.3.7-0.20200602203552-431198de9fc2/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/api v0.3.13/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/api v0.10.0/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI= +github.com/operator-framework/operator-lib v0.5.0/go.mod h1:33Skl0vjauYx3nAS+cSFbHNkX8do7weQ6s5siIV/w1E= github.com/operator-framework/operator-lifecycle-manager v0.0.0-20200321030439-57b580e57e88/go.mod h1:7Ut8p9jJ8C6RZyyhZfZypmlibCIJwK5Wcc+WZDgLkOA= github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= github.com/operator-framework/operator-registry v1.6.1/go.mod h1:sx4wWMiZtYhlUiaKscg3QQUPPM/c1bkrAs4n4KipDb4= github.com/operator-framework/operator-registry v1.6.2-0.20200330184612-11867930adb5/go.mod h1:SHff373z8asEkPo6aWpN0qId4Y/feQTjZxRF8PRhti8= -github.com/operator-framework/operator-registry v1.12.1/go.mod h1:rf4b/h77GUv1+geiej2KzGRQr8iBLF4dXNwr5AuGkrQ= -github.com/operator-framework/operator-registry v1.12.4/go.mod h1:JChIivJVLE1wRbgIhDFzYQYT9yosa2wd6qiTyMuG5mg= +github.com/operator-framework/operator-registry v1.13.4/go.mod h1:YhnIzOVjRU2ZwZtzt+fjcjW8ujJaSFynBEu7QVKaSdU= github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA= github.com/operator-framework/operator-sdk v0.17.0/go.mod h1:wmYi08aoUmtgfoUamURmssI4dkdFGNtSI1Egj+ZfBnk= -github.com/operator-framework/operator-sdk v0.18.0/go.mod h1:xP/DNvnYnIoGK1bLKiD0s7aYZp2fa4AI6t1v3INaoZg= +github.com/operator-framework/operator-sdk v0.19.4/go.mod h1:+gIlE/CfBGFGj51qJ2sLTPZWE1X27cKtjZ0m5vwY8Hw= github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= github.com/ory/dockertest v3.3.4+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw= github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/ovirt/go-ovirt v0.0.0-20200313072907-d30f754823a6/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/ovirt/go-ovirt v0.0.0-20200428093010-9bcc4fd4e6c0/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= -github.com/ovirt/go-ovirt v0.0.0-20200613023950-320a86f9df27/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= github.com/ovirt/go-ovirt v0.0.0-20210112072624-e4d3b104de71/go.mod h1:fLDxPk1Sf64DBYtwIYxrnx3gPZ1q0xPdWdI1y9vxUaw= -github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20200914080915-c4444fb5c201/go.mod h1:XFDLN/srNA1s2Dq+gp4zBvql6nRnfNJzDGzI5vtK85g= github.com/ovirt/terraform-provider-ovirt v0.4.3-0.20210118101701-cc657a8c6634/go.mod h1:LDHfgu36xGyr0tUPZpL+a7HRovpRzlcNiu0CmPcxcUI= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= @@ -2141,7 +2192,6 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= @@ -2295,6 +2345,7 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= +github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -2341,6 +2392,7 @@ github.com/shirou/gopsutil v2.19.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu github.com/shirou/gopsutil/v3 v3.20.10/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -2445,14 +2497,13 @@ github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jW github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.0.2/go.mod h1:eLfe5bL3qbL7ep/KafHzthxejrOF5J3xmt03uL5tzek= -github.com/stoewer/go-strcase v1.1.0/go.mod h1:G7YglbHPK5jX3JcWljxVXRXPh90/dtxfy6xWqxu5b90= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99 h1:VKw5JiV28Q1kB4GH34zzJvLGR4qQQt+XT8EYVTULSjQ= github.com/stolostron/multicloud-operators-foundation v0.0.0-20220112005209-c7d642306a99/go.mod h1:OGgYyIj05jrIlLYF+oczqidvHLKPEdum/fN0aLBjPhQ= -github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f h1:HJ286o7ylkYi3vhycfxz17yZQ/GZtCbihaZogGavDbo= -github.com/stolostron/multiclusterhub-operator v0.0.0-20220111203209-4882a2b93f0f/go.mod h1:iKow3tj3j8B4al2v4agj/ePBVVh8phRrPDj/MkWqT9c= -github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799 h1:eQz6z2yAT4guRBUxwHaYxSeWB2yFhMgLHOHPjZih2rs= -github.com/stolostron/observatorium-operator v0.0.0-20220112075017-39be85036799/go.mod h1:3TfJn5Ot5u8q2aSag1tw3vlqlM6U4Gut3ktydX1ZoBQ= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220106205009-2af6f43fd562 h1:ifA576QBRoYFKuym91i5s6orpG1w+G7csDoMxn4FmHE= +github.com/stolostron/multiclusterhub-operator v0.0.0-20220106205009-2af6f43fd562/go.mod h1:bnUDg8gb0adzST1gzFiivscUlXtE3tYbMtoHsW6DHaY= +github.com/stolostron/observatorium-operator v0.0.0-20220307015247-f9eb849e218e h1:tL8342/03B7eZ/CPKkpyHHd+Sr6wyJ4RyXus8sr9mNs= +github.com/stolostron/observatorium-operator v0.0.0-20220307015247-f9eb849e218e/go.mod h1:3TfJn5Ot5u8q2aSag1tw3vlqlM6U4Gut3ktydX1ZoBQ= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -2471,7 +2522,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -2488,7 +2538,6 @@ github.com/tencentcloud/tencentcloud-sdk-go v3.0.82+incompatible/go.mod h1:0PfYo github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20190808065407-f07404cefc8c/go.mod h1:wk2XFUg6egk4tSDNZtXeKfe2G6690UVyt163PuUxBZk= github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= -github.com/terraform-provider-openstack/terraform-provider-openstack v1.32.0/go.mod h1:Xm/accuOkyS8NkNp9HwqNMPu5rAFnHP7g/2uNRZbO8c= github.com/terraform-provider-openstack/terraform-provider-openstack v1.33.0/go.mod h1:NA2Iaq+p8yIzeHAY9DHEedL/SqrT0AInYP9GTqVLe1k= github.com/terraform-providers/terraform-provider-azuread v0.9.0/go.mod h1:sSDzB/8CD639+yWo5lZf+NJvGSYQBSS6z+GoET9IrzE= github.com/terraform-providers/terraform-provider-google v1.20.1-0.20200623174414-27107f2ee160/go.mod h1:QxehqxV8Swl+O2JXJUdS6orHYJXWUEr4HFfYH5JV9ew= @@ -2574,7 +2623,6 @@ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= @@ -2603,6 +2651,7 @@ github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= @@ -2610,9 +2659,11 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -2803,6 +2854,7 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -2853,7 +2905,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2871,6 +2922,7 @@ golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2898,6 +2950,7 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2932,6 +2985,7 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -2941,7 +2995,6 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2982,7 +3035,6 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -3050,10 +3102,10 @@ golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191020212454-3e7259c5e7c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191110163157-d32e6e3b99c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3129,6 +3181,7 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3147,7 +3200,6 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3269,6 +3321,7 @@ golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200701041122-1837592efa10/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200713235242-6acd2ab80ede/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -3311,7 +3364,6 @@ gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEI gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.0.0-20190915125329-975d99cd20a9/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -3367,7 +3419,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.4/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -3394,6 +3445,7 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -3422,6 +3474,7 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200721032028-5044d0edf986/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -3491,6 +3544,7 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v0.0.0-20200709232328-d8193ee9cc3e/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3514,8 +3568,9 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -3524,6 +3579,8 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBl gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -3550,8 +3607,8 @@ gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQb gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -3576,7 +3633,8 @@ gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= helm.sh/helm/v3 v3.1.0/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= -helm.sh/helm/v3 v3.2.0/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= +helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= +helm.sh/helm/v3 v3.6.3/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -3588,26 +3646,133 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= +k8s.io/api v0.0.0-20181115043458-b799cb063522/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.0.0-20190409092523-d687e77c8ae9/go.mod h1:FQEUn50aaytlU65qqBn/w+5ugllHwrBzKm7DzbnXdzE= +k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= +k8s.io/api v0.0.0-20190725062911-6607c48751ae/go.mod h1:1O0xzX/RAtnm7l+5VEUxZ1ysO2ghatfq/OZED4zM9kA= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= +k8s.io/api v0.0.0-20191122220107-b5267f2975e0/go.mod h1:vYpRfxYkMrmPPSesoHEkGNHxNKTk96REAwqm/inQbs0= +k8s.io/api v0.16.4/go.mod h1:AtzMnsR45tccQss5q8RnF+W8L81DH6XwXwo/joEx9u0= +k8s.io/api v0.16.7/go.mod h1:oUAiGRgo4t+5yqcxjOu5LoHT3wJ8JSbgczkaFYS5L7I= +k8s.io/api v0.16.9/go.mod h1:Y7dZNHs1Xy0mSwSlzL9QShi6qkljnN41yR8oWCRTDe8= +k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= +k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= +k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= +k8s.io/api v0.18.0-rc.1/go.mod h1:ZOh6SbHjOYyaMLlWmB2+UOQKEWDpCnVEVpEyt7S2J9s= +k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= +k8s.io/api v0.19.1/go.mod h1:+u/k4/K/7vp4vsfdT7dyl8Oxk1F26Md4g5F26Tu85PU= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.21.0-rc.0/go.mod h1:Dkc/ZauWJrgZhjOjeBgW89xZQiTBJA2RaBKYHXPsi2Y= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= -k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= +k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= +k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= +k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= +k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto= +k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= +k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= +k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= +k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= +k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= +k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= +k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= +k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= +k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= +k8s.io/apiextensions-apiserver v0.19.4/go.mod h1:B9rpH/nu4JBCtuUp3zTTk8DEjZUupZTBEec7/2zNRYw= +k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= +k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs= +k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= +k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= -k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= +k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= +k8s.io/apiextensions-apiserver v0.22.1 h1:YSJYzlFNFSfUle+yeEXX0lSQyLEoxoPJySRupepb0gE= +k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c= +k8s.io/apimachinery v0.0.0-20181110190943-2a7c93004028/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apimachinery v0.0.0-20190409092423-760d1845f48b/go.mod h1:FW86P8YXVLsbuplGMZeb20J3jYHscrDqw4jELaFJvRU= +k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= +k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8/go.mod h1:sBJWIJZfxLhp7mRsRyuAE/NfKTr3kXGR1iaqg8O0gJo= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= +k8s.io/apimachinery v0.0.0-20191121175448-79c2a76c473a/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.16.4/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= +k8s.io/apimachinery v0.16.7/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= +k8s.io/apimachinery v0.16.9/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= +k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= +k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= +k8s.io/apimachinery v0.18.0-rc.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.1/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.21.0-rc.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.0.0-20191122221311-9d521947b1e1/go.mod h1:RbsZY5zzBIWnz4KbctZsTVjwIuOpTp4Z8oCgFHN4kZQ= +k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc= +k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= +k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= +k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= -k8s.io/apiserver v0.18.0-rc.1/go.mod h1:RYE9w2Lijk1aWW3i3pS7kFGU0Afof+UDoOz1qW9aSYg= +k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= +k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= +k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= +k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.19.1/go.mod h1:iRxYIjA0X2XEyoW8KslN4gDhasfH4bWcjj6ckVeZX28= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/apiserver v0.19.4/go.mod h1:X8WRHCR1UGZDd7HpV0QDc1h/6VbbpAeAGyxSh8yzZXw= k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= +k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= k8s.io/apiserver v0.22.1 h1:Ul9Iv8OMB2s45h2tl5XWPpAZo1VPIJ/6N+MESeed7L8= k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= @@ -3620,50 +3785,73 @@ k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= k8s.io/cli-runtime v0.19.1/go.mod h1:X6g8e4NBiG8GMsKewXsRpo36MO6xrvXa+0wCg7zO4aU= k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= +k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= k8s.io/cli-runtime v0.21.2/go.mod h1:8u/jFcM0QpoI28f6sfrAAIslLCXUYKD5SsPPMWiHYrI= -k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/cli-runtime v0.21.3/go.mod h1:h65y0uXIXDnNjd5J+F3CvQU3ZNplH4+rjqbII7JkD4A= +k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= +k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/cluster-bootstrap v0.0.0-20190202014938-c9acc0c1bea2/go.mod h1:iBSm2nwo3OaiuW8VDvc3ySDXK5SKfUrxwPvBloKG7zg= k8s.io/cluster-registry v0.0.6/go.mod h1:/F+o1rvzjBdLbg782rR8eKrOb20hPy7us+Zu/pjBtAY= k8s.io/code-generator v0.0.0-20181114232248-ae218e241252/go.mod h1:IPqxl/YHk05nodzupwjke6ctMjyNRdV2zZ5/j3/F204= k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb/go.mod h1:cDx5jQmWH25Ff74daM7NVYty9JWw9dvIS9zT9eIubCY= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/code-generator v0.0.0-20191003035328-700b1226c0bd/go.mod h1:HC9p4y3SBN+txSs8x57qmNPXFZ/CxdCHiDTNnocCSEw= +k8s.io/code-generator v0.16.4/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY= +k8s.io/code-generator v0.16.7/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= +k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/code-generator v0.18.0-rc.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.1/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.4/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/code-generator v0.20.2/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= k8s.io/code-generator v0.21.0-rc.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= +k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.0.0-20191122220729-2684fb322cb9/go.mod h1:NFuUusy/X4Tk21m21tcNUihnmp4OI7lXU7/xA+rYXkc= +k8s.io/component-base v0.16.4/go.mod h1:GYQ+4hlkEwdlpAp59Ztc4gYuFhdoZqiAJD1unYDJ3FM= +k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= +k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.18.0-rc.1/go.mod h1:NNlRaxZEdLqTs2+6yXiU2SHl8gKsbcy19Ii+Sfq53RM= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= +k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.19.1/go.mod h1:b0vDKYa8EdJJ8dHUA6fGPj4z8taqGks5mfZvp3p/jVo= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.19.4/go.mod h1:ZzuSLlsWhajIDEkKF73j64Gz/5o0AgON08FgRbEPI70= k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= +k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/component-helpers v0.20.0/go.mod h1:nx6NOtfSfGOxnSZsDJxpGbnsVuUA1UXpwDvZIrtigNk= +k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= k8s.io/component-helpers v0.21.2/go.mod h1:DbyFt/A0p6Cv+R5+QOGSJ5f5t4xDfI8Yb89a57DgJlQ= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= @@ -3696,9 +3884,7 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE= k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-aggregator v0.0.0-20190404125450-f5e124c822d6/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA= -k8s.io/kube-aggregator v0.18.0-rc.1/go.mod h1:35N7x/aAF8C5rEU78J+3pJ/k9v/8LypeWbzqBAEWA1I= k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= k8s.io/kube-aggregator v0.19.1/go.mod h1:oAj1kWeSDCh7sdzUOs6XXPn/jbzJY+yGGxDd0QyLJC8= k8s.io/kube-aggregator v0.20.0/go.mod h1:3Is/gzzWmhhG/rA3CpA1+eVye87lreBQDFGcAGT7gzo= @@ -3717,6 +3903,7 @@ k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= k8s.io/kubectl v0.19.1/go.mod h1:jZM7qucrDpQu05OAoSJk0yRRHRZNydya40dILYh8ODc= k8s.io/kubectl v0.20.0/go.mod h1:8x5GzQkgikz7M2eFGGuu6yOfrenwnw5g4RXOUgbjR1M= +k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= k8s.io/kubectl v0.21.2 h1:9XPCetvOMDqrIZZXb1Ei+g8t6KrIp9ENJaysQjUuLiE= k8s.io/kubectl v0.21.2/go.mod h1:PgeUclpG8VVmmQIl8zpLar3IQEpFc9mrmvlwY3CK1xo= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= @@ -3730,6 +3917,7 @@ k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= k8s.io/metrics v0.19.1/go.mod h1:O/ONCgXDITtJuMveKEDwZSfiqHOiMZTWmyLe/p1BoAA= k8s.io/metrics v0.19.4/go.mod h1:a0gvAzrxQPw2ouBqnXI7X9qlggpPkKAFgWU/Py+KZiU= k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= +k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -3768,9 +3956,9 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jC mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= -open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1 h1:VEQavVubWOg7nlsNBjBAv5RdfuEaJh5oIqxUtSh/7oU= -open-cluster-management.io/addon-framework v0.0.0-20210909134218-e6e993872bb1/go.mod h1:upMV+97asubQnQ1WY3W67nd4pIReZNR4beVkABXFjE0= -open-cluster-management.io/api v0.0.0-20210607023841-cd164385e2bb/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= +open-cluster-management.io/addon-framework v0.0.0-20211014025435-1f42884cdd53 h1:ZpkemuH+/Bkv3u/CcxVpz/uvbqKEWeWmUNml0loQKqg= +open-cluster-management.io/addon-framework v0.0.0-20211014025435-1f42884cdd53/go.mod h1:IUjwRzTzJWdXBGtA1eX/gQffG7LBAPXtXrdquAWuZ+E= +open-cluster-management.io/api v0.0.0-20210908005819-815ac23c7308/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9 h1:ySrjJFbSuPbHEN0OvzTeQO8Bt93rjgvbce7lo2cQeZY= open-cluster-management.io/api v0.0.0-20210916013819-2e58cdb938f9/go.mod h1:9qiA5h/8kvPQnJEOlAPHVjRO9a1jCmDhGzvgMBvXEaE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -3787,34 +3975,35 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyz sigs.k8s.io/controller-runtime v0.0.0-20190520212815-96b67f231945/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.0-beta.2/go.mod h1:TSH2R0nSz4WAlUUlNnOFcOR/VUhfwBLlmtq2X6AiQCA= sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= -sigs.k8s.io/controller-runtime v0.3.1-0.20191016212439-2df793d02076/go.mod h1:p2vzQ3RuSVv9YR4AcM0y8TKHQA+0oLXazKFt6Z0OdS8= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.5.1-0.20200330174416-a11a908d91e0/go.mod h1:j4echH3Y/UPHRpXS65rxGXujda8iWOheMQvDh1uNgaY= sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= +sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4= sigs.k8s.io/controller-runtime v0.9.3-0.20210709165254-650ea59f19cc/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= -sigs.k8s.io/controller-runtime v0.9.7 h1:DlHMlAyLpgEITVvNsuZqMbf8/sJl9HirmCZIeR5H9mQ= sigs.k8s.io/controller-runtime v0.9.7/go.mod h1:nExcHcQ2zvLMeoO9K7rOesGCmgu32srN5SENvpAEbGA= +sigs.k8s.io/controller-runtime v0.10.0 h1:HgyZmMpjUOrtkaFtCnfxsR1bGRuFoAczSNbn2MoKj5U= +sigs.k8s.io/controller-runtime v0.10.0/go.mod h1:GCdh6kqV6IY4LK0JLwX0Zm6g233RtVGdb/f0+KSfprg= sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= -sigs.k8s.io/controller-tools v0.2.2-0.20190930215132-4752ed2de7d2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= -sigs.k8s.io/controller-tools v0.2.9-0.20200331153640-3c5446d407dd/go.mod h1:D2LzYpGDYjxaAALDVYAwaqaKp2fNuyO5yfOBoU/cbBE= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= -sigs.k8s.io/controller-tools v0.3.1-0.20200617211605-651903477185/go.mod h1:JuPG+FXjAeZL7eGmTuXUJduEMlI2/kGqb0rUGlVi+Yo= -sigs.k8s.io/controller-tools v0.4.0/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I= -sigs.k8s.io/kubebuilder v1.0.9-0.20200513134826-f07a0146a40b/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= +sigs.k8s.io/controller-tools v0.6.0/go.mod h1:baRMVPrctU77F+rfAuH2uPqW93k6yQnZA2dhUOr7ihc= +sigs.k8s.io/kubebuilder v1.0.9-0.20200618125005-36aa113dbe99/go.mod h1:FGPx0hvP73+bapzWoy5ePuhAJYgJjrFbPxgvWyortM0= sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= +sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= +sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= +sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/kyaml v0.10.17 h1:4zrV0ym5AYa0e512q7K3Wp1u7mzoWW0xR3UHJcGWGIg= sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/pluginator v1.0.0/go.mod h1:i8HdU5FdH1zDjCKiFf5CNl7slsc0QffyKsY2OuPynJ0= @@ -3822,8 +4011,10 @@ sigs.k8s.io/kustomize/v3 v3.2.0/go.mod h1:ztX4zYc/QIww3gSripwF7TBOarBTm5BvyAMem0 sigs.k8s.io/kustomize/v3 v3.3.1 h1:UOhJqkRINRODnKq24DoDAr4gxk2z2p9iFJWDT3OLBx8= sigs.k8s.io/kustomize/v3 v3.3.1/go.mod h1:2ojB+51Z+YIBpEOknAFX3U8f0XXa94PFcfXPccDxAfg= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.2 h1:WiMoyniAVAYm03w+ImfF9IE2G23GLR/SwDnQyaNZvPk= +sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= @@ -3832,7 +4023,6 @@ sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/loaders/dashboards/pkg/controller/dashboard_controller.go b/loaders/dashboards/pkg/controller/dashboard_controller.go index 9f7c222ff..4c7939b24 100644 --- a/loaders/dashboards/pkg/controller/dashboard_controller.go +++ b/loaders/dashboards/pkg/controller/dashboard_controller.go @@ -299,24 +299,25 @@ func updateDashboard(old, new interface{}, overwrite bool) { } else { klog.Infof("failed to create/update: %v", respStatusCode) } - } else { - if dashboard["title"] == homeDashboardTitle { - // get "id" value from response - re := regexp.MustCompile("\"id\":(\\d+),") - result := re.FindSubmatch(body) - if len(result) != 2 { - klog.Infof("failed to retrieve dashboard id") + return + } + + if dashboard["title"] == homeDashboardTitle { + // get "id" value from response + re := regexp.MustCompile("\"id\":(\\d+),") + result := re.FindSubmatch(body) + if len(result) != 2 { + klog.Infof("failed to retrieve dashboard id") + } else { + id, err := strconv.Atoi(strings.Trim(string(result[1]), " ")) + if err != nil { + klog.Error(err, "failed to parse dashboard id") } else { - id, err := strconv.Atoi(strings.Trim(string(result[1]), " ")) - if err != nil { - klog.Error(err, "failed to parse dashboard id") - } else { - setHomeDashboard(id) - } + setHomeDashboard(id) } } - klog.Info("Dashboard created/updated") } + klog.Info("Dashboard created/updated") } folderTitle = getDashboardCustomFolderTitle(old) diff --git a/operators/endpointmetrics/README.md b/operators/endpointmetrics/README.md index 7b9183709..3dee9d83c 100644 --- a/operators/endpointmetrics/README.md +++ b/operators/endpointmetrics/README.md @@ -86,7 +86,7 @@ xxxxxxxxxxxxxxxxxxxxxxxxxxx 4. Create the configmap named `observability-metrics-allowlist` in namespace `open-cluster-management-addon-observability`: ``` -$ kubectl apply -n open-cluster-management-addon-observability -f https://raw.githubusercontent.com/open-cluster-management/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml +$ kubectl apply -n open-cluster-management-addon-observability -f https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml ``` 5. Update the value of environment variable `COLLECTOR_IMAGE` in the endpoint-metrics-operator deployment, for example: `quay.io/stolostron/metrics-collector:2.3.0-SNAPSHOT-2021-04-08-09-07-10` @@ -137,4 +137,3 @@ observability-addon 137m ### View metrics in dashboard Access Grafana console in hub cluster at https://{YOUR_DOMAIN}/grafana, view the metrics in the dashboard named "ACM:Managed Cluster Monitoring" - diff --git a/operators/endpointmetrics/config/manager/manager.yaml b/operators/endpointmetrics/config/manager/manager.yaml index 449b34924..de7de68af 100644 --- a/operators/endpointmetrics/config/manager/manager.yaml +++ b/operators/endpointmetrics/config/manager/manager.yaml @@ -19,7 +19,7 @@ spec: containers: - name: endpoint-observability-operator image: quay.io/stolostron/endpoint-metrics-operator:latest - imagePullPolicy: Always + imagePullPolicy: IfNotPresent command: - endpoint-monitoring-operator env: diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator.go b/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator.go new file mode 100644 index 000000000..d283463bb --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator.go @@ -0,0 +1,35 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" +) + +type evaluateFn func(metav1.LabelSelectorRequirement, ...interface{}) bool + +var evaluateFns = map[string]evaluateFn{ + "clusterType": evaluateClusterType, +} + +func evluateMatchExpression(expr metav1.LabelSelectorRequirement, params ...interface{}) bool { + if _, ok := evaluateFns[expr.Key]; !ok { + // return false if expr.key not defined + return false + } + return evaluateFns[expr.Key](expr, params...) +} + +func evaluateClusterType(expr metav1.LabelSelectorRequirement, params ...interface{}) bool { + switch expr.Operator { + case metav1.LabelSelectorOpIn: + return util.Contains(expr.Values, params[1].(string)) + case metav1.LabelSelectorOpNotIn: + return !util.Contains(expr.Values, params[1].(string)) + default: + // return false for unsupported/invalid operator + return false + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator_test.go new file mode 100644 index 000000000..d545f9fd3 --- /dev/null +++ b/operators/endpointmetrics/controllers/observabilityendpoint/match_evaluator_test.go @@ -0,0 +1,87 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project. +package observabilityendpoint + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestEvluateMatchExpression(t *testing.T) { + caseList := []struct { + name string + expr metav1.LabelSelectorRequirement + clusterType string + expectedResult bool + }{ + { + name: "unsupported key", + expr: metav1.LabelSelectorRequirement{ + Key: "test_key", + Operator: "In", + Values: []string{"SNO"}, + }, + expectedResult: false, + }, + { + name: "unsupported expr operator", + expr: metav1.LabelSelectorRequirement{ + Key: "clusterType", + Operator: "test_op", + Values: []string{"SNO"}, + }, + expectedResult: false, + }, + { + name: "filter non-SNO rule in SNO", + expr: metav1.LabelSelectorRequirement{ + Key: "clusterType", + Operator: "NotIn", + Values: []string{"SNO"}, + }, + clusterType: "SNO", + expectedResult: false, + }, + { + name: "filter SNO rule in non-SNO", + expr: metav1.LabelSelectorRequirement{ + Key: "clusterType", + Operator: "In", + Values: []string{"SNO"}, + }, + clusterType: "", + expectedResult: false, + }, + { + name: "select non-SNO rule in non-SNO", + expr: metav1.LabelSelectorRequirement{ + Key: "clusterType", + Operator: "NotIn", + Values: []string{"SNO"}, + }, + clusterType: "", + expectedResult: true, + }, + { + name: "select SNO rule in SNO", + expr: metav1.LabelSelectorRequirement{ + Key: "clusterType", + Operator: "In", + Values: []string{"SNO"}, + }, + clusterType: "SNO", + expectedResult: true, + }, + } + + for _, c := range caseList { + t.Run(c.name, func(t *testing.T) { + params := append([]interface{}{"id"}, c.clusterType) + r := evluateMatchExpression(c.expr, params...) + if r != c.expectedResult { + t.Fatalf("Wrong result for test %s, expected %v, got %v", c.name, c.expectedResult, r) + } + }) + } +} diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go index c6ac211bf..e8264d4a0 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "gopkg.in/yaml.v2" @@ -24,16 +25,17 @@ import ( ) const ( - metricsConfigMapKey = "metrics_list.yaml" - metricsCollectorName = "metrics-collector-deployment" - selectorKey = "component" - selectorValue = "metrics-collector" - caMounthPath = "/etc/serving-certs-ca-bundle" - caVolName = "serving-certs-ca-bundle" - mtlsCertName = "observability-controller-open-cluster-management.io-observability-signer-client-cert" - mtlsCaName = "observability-managed-cluster-certs" - limitBytes = 1073741824 - defaultInterval = "30s" + metricsConfigMapKey = "metrics_list.yaml" + metricsOcp311ConfigMapKey = "ocp311_metrics_list.yaml" + metricsCollectorName = "metrics-collector-deployment" + selectorKey = "component" + selectorValue = "metrics-collector" + caMounthPath = "/etc/serving-certs-ca-bundle" + caVolName = "serving-certs-ca-bundle" + mtlsCertName = "observability-controller-open-cluster-management.io-observability-signer-client-cert" + mtlsCaName = "observability-managed-cluster-certs" + limitBytes = 1073741824 + defaultInterval = "30s" ) const ( @@ -45,22 +47,9 @@ var ( promURL = "https://prometheus-k8s:9091" ) -type MetricsAllowlist struct { - NameList []string `yaml:"names"` - MatchList []string `yaml:"matches"` - RenameMap map[string]string `yaml:"renames"` - RuleList []Rule `yaml:"rules"` -} - -// Rule is the struct for recording rules and alert rules -type Rule struct { - Record string `yaml:"record"` - Expr string `yaml:"expr"` -} - func createDeployment(clusterID string, clusterType string, obsAddonSpec oashared.ObservabilityAddonSpec, - hubInfo operatorconfig.HubInfo, allowlist MetricsAllowlist, + hubInfo operatorconfig.HubInfo, allowlist operatorconfig.MetricsAllowlist, nodeSelector map[string]string, tolerations []corev1.Toleration, replicaCount int32) *appsv1.Deployment { interval := fmt.Sprint(obsAddonSpec.Interval) + "s" @@ -122,6 +111,9 @@ func createDeployment(clusterID string, clusterType string, "/usr/bin/metrics-collector", "--from=$(FROM)", "--to-upload=$(TO)", + "--to-upload-ca=/tlscerts/ca/ca.crt", + "--to-upload-cert=/tlscerts/certs/tls.crt", + "--to-upload-key=/tlscerts/certs/tls.key", "--interval=" + interval, "--limit-bytes=" + strconv.Itoa(limitBytes), fmt.Sprintf("--label=\"cluster=%s\"", hubInfo.ClusterName), @@ -149,9 +141,37 @@ func createDeployment(clusterID string, clusterType string, for _, k := range renamekeys { commands = append(commands, fmt.Sprintf("--rename=\"%s=%s\"", k, allowlist.RenameMap[k])) } - for _, rule := range allowlist.RuleList { - commands = append(commands, fmt.Sprintf("--recordingrule={\"name\":\"%s\",\"query\":\"%s\"}", rule.Record, rule.Expr)) + for _, rule := range allowlist.RecordingRuleList { + commands = append( + commands, + fmt.Sprintf("--recordingrule={\"name\":\"%s\",\"query\":\"%s\"}", rule.Record, rule.Expr), + ) + } + + for _, group := range allowlist.CollectRuleGroupList { + if group.Selector.MatchExpression != nil { + for _, expr := range group.Selector.MatchExpression { + if !evluateMatchExpression(expr, clusterID, clusterType, obsAddonSpec, hubInfo, + allowlist, nodeSelector, tolerations, replicaCount) { + continue + } + for _, rule := range group.CollectRuleList { + matchList := []string{} + for _, match := range rule.MatchList { + matchList = append(matchList, `"`+strings.ReplaceAll(match, `"`, `\"`)+`"`) + } + matchListStr := "[" + strings.Join(matchList, ",") + "]" + nameListStr := `["` + strings.Join(rule.NameList, `","`) + `"]` + commands = append( + commands, + fmt.Sprintf("--collectrule={\"name\":\"%s\",\"expr\":\"%s\",\"for\":\"%s\",\"names\":%v,\"matches\":%v}", + rule.Collect, rule.Expr, rule.For, nameListStr, matchListStr), + ) + } + } + } } + from := promURL if !installPrometheus { from = ocpPromURL @@ -195,7 +215,7 @@ func createDeployment(clusterID string, clusterType string, }, }, VolumeMounts: mounts, - ImagePullPolicy: corev1.PullAlways, + ImagePullPolicy: corev1.PullIfNotPresent, }, }, Volumes: volumes, @@ -215,10 +235,18 @@ func updateMetricsCollector(ctx context.Context, client client.Client, obsAddonS hubInfo operatorconfig.HubInfo, clusterID string, clusterType string, replicaCount int32, forceRestart bool) (bool, error) { - list := getMetricsAllowlist(ctx, client) + list := getMetricsAllowlist(ctx, client, clusterType) endpointDeployment := getEndpointDeployment(ctx, client) - deployment := createDeployment(clusterID, clusterType, obsAddonSpec, hubInfo, list, - endpointDeployment.Spec.Template.Spec.NodeSelector, endpointDeployment.Spec.Template.Spec.Tolerations, replicaCount) + deployment := createDeployment( + clusterID, + clusterType, + obsAddonSpec, + hubInfo, + list, + endpointDeployment.Spec.Template.Spec.NodeSelector, + endpointDeployment.Spec.Template.Spec.Tolerations, + replicaCount, + ) found := &appsv1.Deployment{} err := client.Get(ctx, types.NamespacedName{Name: metricsCollectorName, Namespace: namespace}, found) @@ -276,8 +304,8 @@ func deleteMetricsCollector(ctx context.Context, client client.Client) error { func int32Ptr(i int32) *int32 { return &i } -func getMetricsAllowlist(ctx context.Context, client client.Client) MetricsAllowlist { - l := &MetricsAllowlist{} +func getMetricsAllowlist(ctx context.Context, client client.Client, clusterType string) operatorconfig.MetricsAllowlist { + l := &operatorconfig.MetricsAllowlist{} cm := &corev1.ConfigMap{} err := client.Get(ctx, types.NamespacedName{Name: operatorconfig.AllowlistConfigMapName, Namespace: namespace}, cm) @@ -285,7 +313,11 @@ func getMetricsAllowlist(ctx context.Context, client client.Client) MetricsAllow log.Error(err, "Failed to get configmap") } else { if cm.Data != nil { - err = yaml.Unmarshal([]byte(cm.Data[metricsConfigMapKey]), l) + configmapKey := metricsConfigMapKey + if clusterType == "ocp3" { + configmapKey = metricsOcp311ConfigMapKey + } + err = yaml.Unmarshal([]byte(cm.Data[configmapKey]), l) if err != nil { log.Error(err, "Failed to unmarshal data in configmap") } diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go index 3a0b37961..ff584a676 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/metrics_collector_test.go @@ -30,9 +30,22 @@ names: - b matches: - c -rules: +recording_rules: - record: f expr: g +collect_rules: + - name: h + selector: + matchExpressions: + - key: clusterType + operator: NotIn + values: ["SNO"] + rules: + - collect: j + expr: k + for: 1m + names: + - l `}, } } diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go index acf4238b0..aefe9afdd 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/observabilityaddon_controller.go @@ -106,7 +106,11 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R // retrieve the hubInfo hubSecret := &corev1.Secret{} - err = r.Client.Get(ctx, types.NamespacedName{Name: operatorconfig.HubInfoSecretName, Namespace: namespace}, hubSecret) + err = r.Client.Get( + ctx, + types.NamespacedName{Name: operatorconfig.HubInfoSecretName, Namespace: namespace}, + hubSecret, + ) if err != nil { return ctrl.Result{}, err } @@ -197,7 +201,15 @@ func (r *ObservabilityAddonReconciler) Reconcile(ctx context.Context, req ctrl.R if req.Name == mtlsCertName || req.Name == mtlsCaName || req.Name == caConfigmapName { forceRestart = true } - created, err := updateMetricsCollector(ctx, r.Client, obsAddon.Spec, *hubInfo, clusterID, clusterType, 1, forceRestart) + created, err := updateMetricsCollector( + ctx, + r.Client, + obsAddon.Spec, + *hubInfo, clusterID, + clusterType, + 1, + forceRestart) + if err != nil { util.ReportStatus(ctx, r.Client, obsAddon, "Degraded") return ctrl.Result{}, err @@ -234,9 +246,11 @@ func (r *ObservabilityAddonReconciler) initFinalization( return false, err } - // Should we return bool from the delete functions for crb and cm? What is it used for? Should we use the bool before removing finalizer? - // SHould we return true if metricscollector is not found as that means metrics collector is not present? - // Moved this part up as we need to clean up cm and crb before we remove the finalizer - is that the right way to do it? + // Should we return bool from the delete functions for crb and cm? What + // is it used for? Should we use the bool before removing finalizer? + // SHould we return true if metricscollector is not found as that means + // metrics collector is not present? Moved this part up as we need to clean + // up cm and crb before we remove the finalizer - is that the right way to do it? if !installPrometheus { err = deleteMonitoringClusterRoleBinding(ctx, r.Client) if err != nil { diff --git a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go index 800b5bec3..a85d0e622 100644 --- a/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go +++ b/operators/endpointmetrics/controllers/observabilityendpoint/ocp_monitoring_config.go @@ -31,7 +31,12 @@ const ( ) // createHubAmRouterCASecret creates the secret that contains CA of the Hub's Alertmanager Route -func createHubAmRouterCASecret(ctx context.Context, hubInfo *operatorconfig.HubInfo, client client.Client, targetNamespace string) error { +func createHubAmRouterCASecret( + ctx context.Context, + hubInfo *operatorconfig.HubInfo, + client client.Client, + targetNamespace string) error { + hubAmRouterCA := hubInfo.AlertmanagerRouterCA dataMap := map[string][]byte{hubAmRouterCASecretKey: []byte(hubAmRouterCA)} hubAmRouterCASecret := &corev1.Secret{ @@ -219,7 +224,8 @@ func createOrUpdateClusterMonitoringConfig( installProm bool) error { targetNamespace := promNamespace if installProm { - // for *KS, the hub CA and alertmanager access token should be created in namespace: open-cluster-management-addon-observability + // for *KS, the hub CA and alertmanager access token should be created + // in namespace: open-cluster-management-addon-observability targetNamespace = namespace } @@ -298,7 +304,13 @@ func createOrUpdateClusterMonitoringConfig( log.Info("configmap already exists, check if it needs update", "name", clusterMonitoringConfigName) foundClusterMonitoringConfigurationYAMLString, ok := found.Data[clusterMonitoringConfigDataKey] if !ok { - log.Info("configmap data doesn't contain key, try to update it", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + log.Info( + "configmap data doesn't contain key, try to update it", + "name", + clusterMonitoringConfigName, + "key", + clusterMonitoringConfigDataKey, + ) // replace config.yaml in configmap found.Data[clusterMonitoringConfigDataKey] = string(newClusterMonitoringConfigurationYAMLBytes) err = client.Update(ctx, found) @@ -313,7 +325,9 @@ func createOrUpdateClusterMonitoringConfig( log.Info("configmap already exists and key config.yaml exists, check if the value needs update", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) - foundClusterMonitoringConfigurationJSONBytes, err := yaml.YAMLToJSON([]byte(foundClusterMonitoringConfigurationYAMLString)) + foundClusterMonitoringConfigurationJSONBytes, err := yaml.YAMLToJSON( + []byte(foundClusterMonitoringConfigurationYAMLString), + ) if err != nil { log.Error(err, "failed to transform YAML to JSON", "YAML", foundClusterMonitoringConfigurationYAMLString) return err @@ -362,7 +376,9 @@ func createOrUpdateClusterMonitoringConfig( log.Error(err, "failed to marshal the cluster monitoring config") return err } - updatedclusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML(updatedClusterMonitoringConfigurationJSONBytes) + updatedclusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML( + updatedClusterMonitoringConfigurationJSONBytes, + ) if err != nil { log.Error(err, "failed to transform JSON to YAML", "JSON", updatedClusterMonitoringConfigurationJSONBytes) return err @@ -382,7 +398,8 @@ func createOrUpdateClusterMonitoringConfig( func revertClusterMonitoringConfig(ctx context.Context, client client.Client, installProm bool) error { targetNamespace := promNamespace if installProm { - // for *KS, the hub CA and alertmanager access token are not created in namespace: open-cluster-management-addon-observability + // for *KS, the hub CA and alertmanager access token are not created in namespace: + // open-cluster-management-addon-observability targetNamespace = namespace } @@ -416,7 +433,13 @@ func revertClusterMonitoringConfig(ctx context.Context, client client.Client, in log.Info("configmap exists, check if it needs revert", "name", clusterMonitoringConfigName) foundClusterMonitoringConfigurationYAML, ok := found.Data[clusterMonitoringConfigDataKey] if !ok { - log.Info("configmap data doesn't contain key, no need action", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + log.Info( + "configmap data doesn't contain key, no need action", + "name", + clusterMonitoringConfigName, + "key", + clusterMonitoringConfigDataKey, + ) return nil } foundClusterMonitoringConfigurationJSON, err := yaml.YAMLToJSON([]byte(foundClusterMonitoringConfigurationYAML)) @@ -425,7 +448,13 @@ func revertClusterMonitoringConfig(ctx context.Context, client client.Client, in return err } - log.Info("configmap exists and key config.yaml exists, check if the value needs revert", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + log.Info( + "configmap exists and key config.yaml exists, check if the value needs revert", + "name", + clusterMonitoringConfigName, + "key", + clusterMonitoringConfigDataKey, + ) foundClusterMonitoringConfiguration := &cmomanifests.ClusterMonitoringConfiguration{} if err := json.Unmarshal([]byte(foundClusterMonitoringConfigurationJSON), foundClusterMonitoringConfiguration); err != nil { log.Error(err, "failed to marshal the cluster monitoring config") @@ -433,7 +462,13 @@ func revertClusterMonitoringConfig(ctx context.Context, client client.Client, in } if foundClusterMonitoringConfiguration.PrometheusK8sConfig == nil { - log.Info("configmap data doesn't key: prometheusK8s, no need action", "name", clusterMonitoringConfigName, "key", clusterMonitoringConfigDataKey) + log.Info( + "configmap data doesn't key: prometheusK8s, no need action", + "name", + clusterMonitoringConfigName, + "key", + clusterMonitoringConfigDataKey, + ) return nil } else { // check if externalLabels exists @@ -486,7 +521,9 @@ func revertClusterMonitoringConfig(ctx context.Context, client client.Client, in log.Error(err, "failed to marshal the cluster monitoring config") return err } - updatedClusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML(updatedClusterMonitoringConfigurationJSONBytes) + updatedClusterMonitoringConfigurationYAMLBytes, err := yaml.JSONToYAML( + updatedClusterMonitoringConfigurationJSONBytes, + ) if err != nil { log.Error(err, "failed to transform JSON to YAML", "JSON", updatedClusterMonitoringConfigurationJSONBytes) return err diff --git a/operators/endpointmetrics/main.go b/operators/endpointmetrics/main.go index ccbc1b6a0..45983624c 100644 --- a/operators/endpointmetrics/main.go +++ b/operators/endpointmetrics/main.go @@ -13,6 +13,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. "github.com/IBM/controller-filtered-cache/filteredcache" ocinfrav1 "github.com/openshift/api/config/v1" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" @@ -42,6 +43,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(oav1beta1.AddToScheme(scheme)) utilruntime.Must(ocinfrav1.AddToScheme(scheme)) + utilruntime.Must(prometheusv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } diff --git a/operators/endpointmetrics/manifests/prometheus/crd/alertmanager_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/alertmanager_crd_0_53_1.yaml new file mode 100644 index 000000000..f48b94bc0 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/alertmanager_crd_0_53_1.yaml @@ -0,0 +1,6043 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: alertmanagers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Alertmanager + listKind: AlertmanagerList + plural: alertmanagers + singular: alertmanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Alertmanager + jsonPath: .spec.version + name: Version + type: string + - description: The desired replicas number of Alertmanagers + jsonPath: .spec.replicas + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional + Alertmanagers to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertmanagerConfigNamespaceSelector: + description: Namespaces to be selected for AlertmanagerConfig discovery. + If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + alertmanagerConfigSelector: + description: AlertmanagerConfigs to be selected for to merge and configure + Alertmanager with. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + baseImage: + description: 'Base image that is used to deploy pods, without tag. + Deprecated: use ''image'' instead' + type: string + clusterAdvertiseAddress: + description: 'ClusterAdvertiseAddress is the explicit address to advertise + in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. + [1] RFC1918: https://tools.ietf.org/html/rfc1918' + type: string + clusterGossipInterval: + description: Interval between gossip attempts. + type: string + clusterPeerTimeout: + description: Timeout for cluster peering. + type: string + clusterPushpullInterval: + description: Interval between pushpull attempts. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Alertmanager object, which shall be mounted into the Alertmanager + Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + items: + type: string + type: array + configSecret: + description: ConfigSecret is the name of a Kubernetes Secret in the + same namespace as the Alertmanager object, which contains configuration + for this Alertmanager instance. Defaults to 'alertmanager-' + The secret is mounted into /etc/alertmanager/config. + type: string + containers: + description: 'Containers allows injecting additional containers. This + is meant to allow adding an authentication proxy to an Alertmanager + pod. Containers described here modify an operator generated container + if they share the same name and modifications are done via a strategic + merge patch. The current container names are: `alertmanager` and + `config-reloader`. Overriding containers is entirely outside the + scope of what the maintainers will support and by doing so, you + accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Alertmanager is not served from root of a DNS name. + type: string + forceEnableClusterMode: + description: ForceEnableClusterMode ensures Alertmanager does not + deactivate the cluster mode when running with a single replica. + Use case is e.g. spanning an Alertmanager cluster across Kubernetes + clusters with a single replica in each. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag + and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Alertmanager + is being configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same + namespace to use for pulling prometheus and alertmanager images + from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Alertmanager configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart + of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching + is entirely outside the scope of what the maintainers will support + and by doing so, you accept that this behaviour may break at any + time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, + so that it does not bind against the Pod IP. Note this is only for + the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + type: string + minReadySeconds: + description: Minimum number of seconds for which a newly created pod + should be ready without any of its container crashing for it to + be considered available. Defaults to 0 (pod will be considered available + as soon as it is ready) This is an alpha field and requires enabling + StatefulSetMinReadySeconds feature gate. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlying managed + objects are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the alertmanager pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a + client to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. + The controller will eventually make the size of the running cluster + equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + retention: + description: Time duration Alertmanager shall retain data for. Default + is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` + (milliseconds seconds minutes hours). + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers + for. This is useful, if using ExternalURL and a proxy is rewriting + HTTP routes of a request, and the actual ExternalURL is still true, + but the server serves requests under a different route prefix. For + example for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as + the Alertmanager object, which shall be mounted into the Alertmanager + Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. Note + that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount + to use to run the Prometheus Pods. + type: string + sha: + description: 'SHA of Alertmanager container image to be deployed. + Defaults to the value of `version`. Similar to a tag, but the SHA + explicitly deploys an immutable container image. Version and Tag + are ignored if SHA is set. Deprecated: use ''image'' instead. The + image digest can be specified as part of the image URL.' + type: string + storage: + description: Storage is the definition of how storage will be used + by the Alertmanager instances. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default + in a future release, this option will become unnecessary. DisableMountSubPath + allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: 'EphemeralVolumeSource to be used by the Prometheus + StatefulSets. This is a beta field in k8s 1.21, for lower versions, + starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC will + be deleted together with the pod. The name of the PVC will + be `-` where `` is the + name from the `PodSpec.Volumes` array entry. Pod validation + will reject the pod if the concatenated name is not valid + for a PVC (for example, too long). \n An existing PVC with + that name that is not owned by the pod will *not* be used + for the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated PVC + is removed. If such a pre-created PVC is meant to be used + by the pod, the PVC has to updated with an owner reference + to the pod once the pod exists. Normally this should not + be necessary, but it may be useful when manually reconstructing + a broken cluster. \n This field is read-only and no changes + will be made by Kubernetes to the PVC after it has been + created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will + be copied into the PVC when creating it. No other fields + are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified data + source. If the AnyVolumeDataSource feature gate + is enabled, this field will always have the same + contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API + group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the DataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the other + is non-empty. There are two important differences + between DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by + the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is + implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST + resource this object represents. Servers may infer this + from the endpoint the client submits requests to. Cannot + be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to + an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. They + are not queryable and should be preserved when modifying + objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. + Is required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be + updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of + a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always have + the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API group + (non core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they + must have the same value. For backwards compatibility, + both fields (DataSource and DataSourceRef) will be set + to the same value automatically if one of them is empty + and the other is non-empty. There are two important + differences between DataSource and DataSourceRef: * + While DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed value + is specified. (Alpha) Using this field requires the + AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the status + field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: The storage resource within AllocatedResources + tracks the capacity allocated to a PVC. It may be larger + than the actual capacity when a volume expansion operation + is requested. For storage quota, the larger value from + allocatedResources and PVC.spec.resources is used. If + allocatedResources is not set, PVC.spec.resources alone + is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower + than the requested capacity. This is an alpha field + and requires enabling RecoverVolumeExpansionFailure + feature. + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails + details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is + being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType + is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + resizeStatus: + description: ResizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Alertmanager container image to be deployed. + Defaults to the value of `version`. Version is ignored if Tag is + set. Deprecated: use ''image'' instead. The image tag can be specified + as part of the image URL.' + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + For example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 1/1/0: | zone1 + | zone2 | zone3 | | P | P | | - if MaxSkew is + 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + scheduling it onto zone1(zone2) would make the ActualSkew(2-0) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the alertmanager container, + that are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on + the output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to + populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) or + a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. + Read-only. Not included when requesting from the apiserver, only from + the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Alertmanager cluster. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this + Alertmanager cluster (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager + cluster. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Alertmanager cluster that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/alertmanagerconfig_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/alertmanagerconfig_crd_0_53_1.yaml new file mode 100644 index 000000000..97975e135 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/alertmanagerconfig_crd_0_53_1.yaml @@ -0,0 +1,2846 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: alertmanagerconfigs.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: AlertmanagerConfig + listKind: AlertmanagerConfigList + plural: alertmanagerconfigs + singular: alertmanagerconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AlertmanagerConfig defines a namespaced AlertmanagerConfig to + be aggregated across multiple namespaces configuring one Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlertmanagerConfigSpec is a specification of the desired + behavior of the Alertmanager configuration. By definition, the Alertmanager + configuration only applies to alerts for which the `namespace` label + is equal to the namespace of the AlertmanagerConfig resource. + properties: + inhibitRules: + description: List of inhibition rules. The rules will only apply to + alerts matching the resource’s namespace. + items: + description: InhibitRule defines an inhibition rule that allows + to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule + properties: + equal: + description: Labels that must have an equal value in the source + and target alert for the inhibition to take effect. + items: + type: string + type: array + sourceMatch: + description: Matchers for which one or more alerts have to exist + for the inhibition to take effect. The operator enforces that + the alert matches the resource’s namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression + (true). Deprecated as of AlertManager >= v0.22.0 where + a user should use MatchType instead. + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + targetMatch: + description: Matchers that have to be fulfilled in the alerts + to be muted. The operator enforces that the alert matches + the resource’s namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression + (true). Deprecated as of AlertManager >= v0.22.0 where + a user should use MatchType instead. + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + type: object + type: array + muteTimeIntervals: + description: List of MuteTimeInterval specifying when the routes should + be muted. + items: + description: MuteTimeInterval specifies the periods in time when + notifications will be muted + properties: + name: + description: Name of the time interval + type: string + timeIntervals: + description: TimeIntervals is a list of TimeInterval + items: + description: TimeInterval describes intervals of time + properties: + daysOfMonth: + description: DaysOfMonth is a list of DayOfMonthRange + items: + description: DayOfMonthRange is an inclusive range of + days of the month beginning at 1 + properties: + end: + description: End of the inclusive range + maximum: 31 + minimum: -31 + type: integer + start: + description: Start of the inclusive range + maximum: 31 + minimum: -31 + type: integer + type: object + type: array + months: + description: Months is a list of MonthRange + items: + description: MonthRange is an inclusive range of months + of the year beginning in January Months can be specified + by name (e.g 'January') by numerical month (e.g '1') + or as an inclusive range (e.g 'January:March', '1:3', + '1:March') + pattern: ^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$) + type: string + type: array + times: + description: Times is a list of TimeRange + items: + description: TimeRange defines a start and end time + in 24hr format + properties: + endTime: + description: EndTime is the end time in 24hr format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + startTime: + description: StartTime is the start time in 24hr + format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + type: object + type: array + weekdays: + description: Weekdays is a list of WeekdayRange + items: + description: WeekdayRange is an inclusive range of days + of the week beginning on Sunday Days can be specified + by name (e.g 'Sunday') or as an inclusive range (e.g + 'Monday:Friday') + pattern: ^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$) + type: string + type: array + years: + description: Years is a list of YearRange + items: + description: YearRange is an inclusive range of years + pattern: ^2\d{3}(?::2\d{3}|$) + type: string + type: array + type: object + type: array + type: object + type: array + receivers: + description: List of receivers. + items: + description: Receiver defines one or more notification integrations. + properties: + emailConfigs: + description: List of Email configurations. + items: + description: EmailConfig configures notifications via Email. + properties: + authIdentity: + description: The identity to use for authentication. + type: string + authPassword: + description: The secret's key that contains the password + to use for authentication. The secret needs to be in + the same namespace as the AlertmanagerConfig object + and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + authSecret: + description: The secret's key that contains the CRAM-MD5 + secret. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + authUsername: + description: The username to use for authentication. + type: string + from: + description: The sender address. + type: string + headers: + description: Further headers email header key/value pairs. + Overrides any headers previously set by the notification + implementation. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + hello: + description: The hostname to identify to the SMTP server. + type: string + html: + description: The HTML body of the email notification. + type: string + requireTLS: + description: The SMTP TLS requirement. Note that Go does + not support unencrypted connections to remote SMTP endpoints. + type: boolean + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + smarthost: + description: The SMTP host and port through which emails + are sent. E.g. example.com:25 + type: string + text: + description: The text body of the email notification. + type: string + tlsConfig: + description: TLS configuration + properties: + ca: + description: Struct containing the CA cert to use + for the targets. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file + for the targets. + properties: + configMap: + description: ConfigMap containing data to use + for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for + the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file + for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + to: + description: The email address to send notifications to. + type: string + type: object + type: array + name: + description: Name of the receiver. Must be unique across all + items from the list. + minLength: 1 + type: string + opsgenieConfigs: + description: List of OpsGenie configurations. + items: + description: OpsGenieConfig configures notifications via OpsGenie. + See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config + properties: + apiKey: + description: The secret's key that contains the OpsGenie + API key. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + apiURL: + description: The URL to send OpsGenie API requests to. + type: string + description: + description: Description of the incident. + type: string + details: + description: A set of arbitrary key/value pairs that provide + further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Alert text limited to 130 characters. + type: string + note: + description: Additional alert note. + type: string + priority: + description: Priority level of alert. Possible values + are P1, P2, P3, P4, and P5. + type: string + responders: + description: List of responders responsible for notifications. + items: + description: OpsGenieConfigResponder defines a responder + to an incident. One of `id`, `name` or `username` + has to be defined. + properties: + id: + description: ID of the responder. + type: string + name: + description: Name of the responder. + type: string + type: + description: Type of responder. + minLength: 1 + type: string + username: + description: Username of the responder. + type: string + required: + - type + type: object + type: array + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + source: + description: Backlink to the sender of the notification. + type: string + tags: + description: Comma separated list of tags attached to + the notifications. + type: string + type: object + type: array + pagerdutyConfigs: + description: List of PagerDuty configurations. + items: + description: PagerDutyConfig configures notifications via + PagerDuty. See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config + properties: + class: + description: The class/type of the event. + type: string + client: + description: Client identification. + type: string + clientURL: + description: Backlink to the sender of notification. + type: string + component: + description: The part or component of the affected system + that is broken. + type: string + description: + description: Description of the incident. + type: string + details: + description: Arbitrary key/value pairs that provide further + detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + group: + description: A cluster or grouping of sources. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + pagerDutyImageConfigs: + description: A list of image details to attach that provide + further detail about an incident. + items: + description: PagerDutyImageConfig attaches images to + an incident + properties: + alt: + description: Alt is the optional alternative text + for the image. + type: string + href: + description: Optional URL; makes the image a clickable + link. + type: string + src: + description: Src of the image being attached to + the incident + type: string + type: object + type: array + pagerDutyLinkConfigs: + description: A list of link details to attach that provide + further detail about an incident. + items: + description: PagerDutyLinkConfig attaches text links + to an incident + properties: + alt: + description: Text that describes the purpose of + the link, and can be used as the link's text. + type: string + href: + description: Href is the URL of the link to be attached + type: string + type: object + type: array + routingKey: + description: The secret's key that contains the PagerDuty + integration key (when using Events API v2). Either this + field or `serviceKey` needs to be defined. The secret + needs to be in the same namespace as the AlertmanagerConfig + object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + serviceKey: + description: The secret's key that contains the PagerDuty + service key (when using integration type "Prometheus"). + Either this field or `routingKey` needs to be defined. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + severity: + description: Severity of the incident. + type: string + url: + description: The URL to send requests to. + type: string + type: object + type: array + pushoverConfigs: + description: List of Pushover configurations. + items: + description: PushoverConfig configures notifications via Pushover. + See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config + properties: + expire: + description: How long your notification will continue + to be retried for, unless the user acknowledges the + notification. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + html: + description: Whether notification message is HTML or plain + text. + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: Notification message. + type: string + priority: + description: Priority, see https://pushover.net/api#priority + type: string + retry: + description: How often the Pushover servers will send + the same notification to the user. Must be at least + 30 seconds. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sound: + description: The name of one of the sounds supported by + device clients to override the user's default sound + choice + type: string + title: + description: Notification title. + type: string + token: + description: The secret's key that contains the registered + application’s API token, see https://pushover.net/apps. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + url: + description: A supplementary URL shown alongside the message. + type: string + urlTitle: + description: A title for supplementary URL, otherwise + just the URL is shown + type: string + userKey: + description: The secret's key that contains the recipient + user’s user key. The secret needs to be in the same + namespace as the AlertmanagerConfig object and accessible + by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + type: array + slackConfigs: + description: List of Slack configurations. + items: + description: SlackConfig configures notifications via Slack. + See https://prometheus.io/docs/alerting/latest/configuration/#slack_config + properties: + actions: + description: A list of Slack actions that are sent with + each notification. + items: + description: SlackAction configures a single Slack action + that is sent with each notification. See https://api.slack.com/docs/message-attachments#action_fields + and https://api.slack.com/docs/message-buttons for + more information. + properties: + confirm: + description: SlackConfirmationField protect users + from destructive actions or particularly distinguished + decisions by asking them to confirm their button + click one more time. See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields + for more information. + properties: + dismissText: + type: string + okText: + type: string + text: + minLength: 1 + type: string + title: + type: string + required: + - text + type: object + name: + type: string + style: + type: string + text: + minLength: 1 + type: string + type: + minLength: 1 + type: string + url: + type: string + value: + type: string + required: + - text + - type + type: object + type: array + apiURL: + description: The secret's key that contains the Slack + webhook URL. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + callbackId: + type: string + channel: + description: The channel or user to send notifications + to. + type: string + color: + type: string + fallback: + type: string + fields: + description: A list of Slack fields that are sent with + each notification. + items: + description: SlackField configures a single Slack field + that is sent with each notification. Each field must + contain a title, value, and optionally, a boolean + value to indicate if the field is short enough to + be displayed next to other fields designated as short. + See https://api.slack.com/docs/message-attachments#fields + for more information. + properties: + short: + type: boolean + title: + minLength: 1 + type: string + value: + minLength: 1 + type: string + required: + - title + - value + type: object + type: array + footer: + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + iconEmoji: + type: string + iconURL: + type: string + imageURL: + type: string + linkNames: + type: boolean + mrkdwnIn: + items: + type: string + type: array + pretext: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + shortFields: + type: boolean + text: + type: string + thumbURL: + type: string + title: + type: string + titleLink: + type: string + username: + type: string + type: object + type: array + victoropsConfigs: + description: List of VictorOps configurations. + items: + description: VictorOpsConfig configures notifications via + VictorOps. See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config + properties: + apiKey: + description: The secret's key that contains the API key + to use when talking to the VictorOps API. The secret + needs to be in the same namespace as the AlertmanagerConfig + object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + apiUrl: + description: The VictorOps API URL. + type: string + customFields: + description: Additional custom fields for notification. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entityDisplayName: + description: Contains summary of the alerted problem. + type: string + httpConfig: + description: The HTTP client's configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + messageType: + description: Describes the behavior of the alert (CRITICAL, + WARNING, INFO). + type: string + monitoringTool: + description: The monitoring tool the state message is + from. + type: string + routingKey: + description: A key used to map the alert to a team. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + stateMessage: + description: Contains long explanation of the alerted + problem. + type: string + type: object + type: array + webhookConfigs: + description: List of webhook configurations. + items: + description: WebhookConfig configures notifications via a + generic receiver supporting the webhook payload. See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + maxAlerts: + description: Maximum number of alerts to be sent per webhook + message. When 0, all alerts are included. + format: int32 + minimum: 0 + type: integer + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + url: + description: The URL to send HTTP POST requests to. `urlSecret` + takes precedence over `url`. One of `urlSecret` and + `url` should be defined. + type: string + urlSecret: + description: The secret's key that contains the webhook + URL to send HTTP requests to. `urlSecret` takes precedence + over `url`. One of `urlSecret` and `url` should be defined. + The secret needs to be in the same namespace as the + AlertmanagerConfig object and accessible by the Prometheus + Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + type: array + wechatConfigs: + description: List of WeChat configurations. + items: + description: WeChatConfig configures notifications via WeChat. + See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config + properties: + agentID: + type: string + apiSecret: + description: The secret's key that contains the WeChat + API key. The secret needs to be in the same namespace + as the AlertmanagerConfig object and accessible by the + Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + apiURL: + description: The WeChat API URL. + type: string + corpID: + description: The corp id for authentication. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for + the client. This is mutually exclusive with BasicAuth + and is only available starting from Alertmanager + v0.22+. + properties: + credentials: + description: The secret's key that contains the + credentials of the request + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults + to Bearer, Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually + exclusive with Authorization. If both are defined, + BasicAuth takes precedence. + properties: + password: + description: The secret in the service monitor + namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor + namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer + token to be used by the client for authentication. + The secret needs to be in the same namespace as + the AlertmanagerConfig object and accessible by + the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to + use for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert + file for the targets. + properties: + configMap: + description: ConfigMap containing data to + use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use + for the targets. + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key + file for the targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the + targets. + type: string + type: object + type: object + message: + description: API request data as defined by the WeChat + API. + type: string + messageType: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + toParty: + type: string + toTag: + type: string + toUser: + type: string + type: object + type: array + required: + - name + type: object + type: array + route: + description: The Alertmanager route definition for alerts matching + the resource’s namespace. If present, it will be added to the generated + Alertmanager configuration as a first-level route. + properties: + continue: + description: Boolean indicating whether an alert should continue + matching subsequent sibling nodes. It will always be overridden + to true for the first-level route by the Prometheus operator. + type: boolean + groupBy: + description: List of labels to group by. Labels must not be repeated + (unique list). Special label "..." (aggregate by all possible + labels), if provided, must be the only element in the list. + items: + type: string + type: array + groupInterval: + description: 'How long to wait before sending an updated notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "5m"' + type: string + groupWait: + description: 'How long to wait before sending the initial notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "30s"' + type: string + matchers: + description: 'List of matchers that the alert’s labels should + match. For the first level route, the operator removes any existing + equality and regexp matcher on the `namespace` label and adds + a `namespace: ` matcher.' + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager + >= v0.22.0 and takes precedence over Regex (deprecated) + if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression + (true). Deprecated as of AlertManager >= v0.22.0 where + a user should use MatchType instead. + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + muteTimeIntervals: + description: 'Note: this comment applies to the field definition + above but appears below otherwise it gets included in the generated + manifest. CRD schema doesn''t support self-referential types + for now (see https://github.com/kubernetes/kubernetes/issues/62872). + We have to use an alternative type to circumvent the limitation. + The downside is that the Kube API can''t validate the data beyond + the fact that it is a valid JSON representation. MuteTimeIntervals + is a list of MuteTimeInterval names that will mute this route + when matched,' + items: + type: string + type: array + receiver: + description: Name of the receiver for this route. If not empty, + it should be listed in the `receivers` field. + type: string + repeatInterval: + description: 'How long to wait before repeating the last notification. + Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` + Example: "4h"' + type: string + routes: + description: Child routes. + items: + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml b/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml new file mode 100644 index 000000000..0386b6f68 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- alertmanager_crd_0_53_1.yaml +- alertmanagerconfig_crd_0_53_1.yaml +- podmonitor_crd_0_53_1.yaml +- probe_crd_0_53_1.yaml +- prometheus_crd_0_53_1.yaml +- prometheusrule_crd_0_53_1.yaml +- servicemonitor_crd_0_53_1.yaml +- thanosruler_crd_0_53_1.yaml + + diff --git a/operators/endpointmetrics/manifests/prometheus/crd/podmonitor_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/podmonitor_crd_0_53_1.yaml new file mode 100644 index 000000000..20c244848 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/podmonitor_crd_0_53_1.yaml @@ -0,0 +1,580 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: podmonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PodMonitor + listKind: PodMonitorList + plural: podmonitors + singular: podmonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery + by Prometheus. + properties: + jobLabel: + description: The label to use to retrieve the job name from. + type: string + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects + are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + podMetricsEndpoints: + description: A list of endpoints allowed as part of this PodMonitor. + items: + description: PodMetricsEndpoint defines a scrapeable endpoint of + a Kubernetes Pod serving Prometheus metrics. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over + basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping + targets. The secret needs to be in the same namespace as the + pod monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before + ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 + client id + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the pod port this endpoint refers to. Mutually + exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. + Prometheus Operator automatically adds relabelings for a few + standard Kubernetes fields and replaces original scrape job + name with __tmp_prometheus_job_name. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Deprecated: Use ''port'' instead.' + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + type: array + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod + onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Pod objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + targetLimit: + description: TargetLimit defines a limit on the number of scraped + targets that will be accepted. + format: int64 + type: integer + required: + - podMetricsEndpoints + - selector + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/probe_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/probe_crd_0_53_1.yaml new file mode 100644 index 000000000..ef63c8f00 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/probe_crd_0_53_1.yaml @@ -0,0 +1,610 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: probes.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Probe + listKind: ProbeList + plural: probes + singular: probe + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Probe defines monitoring for a set of static targets or ingresses. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Ingress selection for target discovery + by Prometheus. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: The secret's key that contains the credentials of + the request + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic + authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. + The secret needs to be in the same namespace as the probe and accessible + by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + interval: + description: Interval at which targets are probed using the configured + prober. If not specified Prometheus' global scrape interval is used. + type: string + jobName: + description: The job name assigned to scraped metrics by default. + type: string + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the label + set, being applied to samples before ingestion. It defines ``-section + of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default + is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label + values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex capture + groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label + values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. + Their content is concatenated using the configured separator + and matched against the configured regular expression for + the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in + a replace action. It is mandatory for replace actions. Regex + capture groups are available. + type: string + type: object + type: array + module: + description: 'The module to use for probing specifying how to probe + the target. Example module configuring in the blackbox exporter: + https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' + type: string + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 client + id + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + prober: + description: Specification for the prober to use for probing targets. + The prober.URL parameter is required. Targets cannot be probed if + left empty. + properties: + path: + description: Path to collect metrics from. Defaults to `/probe`. + type: string + proxyUrl: + description: Optional ProxyURL. + type: string + scheme: + description: HTTP scheme to use for scraping. Defaults to `http`. + type: string + url: + description: Mandatory URL of the prober. + type: string + required: + - url + type: object + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + scrapeTimeout: + description: Timeout for scraping metrics from the Prometheus exporter. + type: string + targetLimit: + description: TargetLimit defines a limit on the number of scraped + targets that will be accepted. + format: int64 + type: integer + targets: + description: Targets defines a set of static and/or dynamically discovered + targets to be probed using the prober. + properties: + ingress: + description: Ingress defines the set of dynamically discovered + ingress objects which hosts are considered for probing. + properties: + namespaceSelector: + description: Select Ingress objects by namespace. + properties: + any: + description: Boolean describing whether all namespaces + are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to samples before ingestion. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of + the label set, being applied to samples before ingestion. + It defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex + replace is performed if the regular expression matches. + Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + selector: + description: Select Ingress objects by labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: object + staticConfig: + description: 'StaticConfig defines static targets which are considers + for probing. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' + properties: + labels: + additionalProperties: + type: string + description: Labels assigned to all metrics scraped from the + targets. + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to samples before ingestion. + More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of + the label set, being applied to samples before ingestion. + It defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex + replace is performed if the regular expression matches. + Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + static: + description: Targets is a list of URLs to probe using the + configured prober. + items: + type: string + type: array + type: object + type: object + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_53_1.yaml new file mode 100644 index 000000000..e90e05ae7 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/prometheus_crd_0_53_1.yaml @@ -0,0 +1,8304 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: prometheuses.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Prometheus + listKind: PrometheusList + plural: prometheuses + singular: prometheus + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Prometheus + jsonPath: .spec.version + name: Version + type: string + - description: The desired replicas number of Prometheuses + jsonPath: .spec.replicas + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Prometheus defines a Prometheus deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalAlertManagerConfigs: + description: 'AdditionalAlertManagerConfigs allows specifying a key + of a Secret containing additional Prometheus AlertManager configurations. + AlertManager configurations specified are appended to the configurations + generated by the Prometheus Operator. Job configurations specified + must have the form as specified in the official Prometheus documentation: + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. + As AlertManager configs are appended, the user is responsible to + make sure it is valid. Note that using this feature may expose the + possibility to break upgrades of Prometheus. It is advised to review + Prometheus release notes to ensure that no incompatible AlertManager + configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalAlertRelabelConfigs: + description: 'AdditionalAlertRelabelConfigs allows specifying a key + of a Secret containing additional Prometheus alert relabel configurations. + Alert relabel configurations specified are appended to the configurations + generated by the Prometheus Operator. Alert relabel configurations + specified must have the form as specified in the official Prometheus + documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + As alert relabel configs are appended, the user is responsible to + make sure it is valid. Note that using this feature may expose the + possibility to break upgrades of Prometheus. It is advised to review + Prometheus release notes to ensure that no incompatible alert relabel + configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a + Secret containing additional Prometheus scrape configurations. Scrape + configurations specified are appended to the configurations generated + by the Prometheus Operator. Job configurations specified must have + the form as specified in the official Prometheus documentation: + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. + As scrape configs are appended, the user is responsible to make + sure it is valid. Note that using this feature may expose the possibility + to break upgrades of Prometheus. It is advised to review Prometheus + release notes to ensure that no incompatible scrape configs are + going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alerting: + description: Define details regarding alerting. + properties: + alertmanagers: + description: AlertmanagerEndpoints Prometheus should fire alerts + against. + items: + description: AlertmanagerEndpoints defines a selection of a + single Endpoints object containing alertmanager IPs to fire + alerts against. + properties: + apiVersion: + description: Version of the Alertmanager API that Prometheus + uses to send alerts. It can be "v1" or "v2". + type: string + authorization: + description: Authorization section for this alertmanager + endpoint + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults to + Bearer, Basic will cause an error + type: string + type: object + bearerTokenFile: + description: BearerTokenFile to read from filesystem to + use when authenticating to Alertmanager. + type: string + name: + description: Name of Endpoints object in Namespace. + type: string + namespace: + description: Namespace of Endpoints object. + type: string + pathPrefix: + description: Prefix for the HTTP path alerts are pushed + to. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port the Alertmanager API is exposed on. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use when firing alerts. + type: string + timeout: + description: Timeout is a per-target Alertmanager timeout + when pushing alerts. + type: string + tlsConfig: + description: TLS Config to use for alertmanager connection. + properties: + ca: + description: Struct containing the CA cert to use for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the + targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file + for the targets. + properties: + configMap: + description: ConfigMap containing data to use for + the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the + targets. + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for + the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - name + - namespace + - port + type: object + type: array + required: + - alertmanagers + type: object + allowOverlappingBlocks: + description: AllowOverlappingBlocks enables vertical compaction and + vertical query merge in Prometheus. This is still experimental in + Prometheus so it may change in any upcoming release. + type: boolean + apiserverConfig: + description: APIServerConfig allows specifying a host and auth methods + to access apiserver. If left empty, Prometheus is assumed to run + inside of the cluster and will discover API servers automatically + and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + properties: + authorization: + description: Authorization section for accessing apiserver + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + credentialsFile: + description: File to read a secret from, mutually exclusive + with Credentials (from SafeAuthorization) + type: string + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth allow an endpoint to authenticate over + basic authentication + properties: + password: + description: The secret in the service monitor namespace that + contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that + contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for accessing apiserver. + type: string + bearerTokenFile: + description: File to read bearer token for accessing apiserver. + type: string + host: + description: Host of apiserver. A valid string consisting of a + hostname or IP followed by an optional port number + type: string + tlsConfig: + description: TLS Config to use for accessing apiserver. + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: ArbitraryFSAccessThroughSMs configures whether configuration + based on a service monitor can access arbitrary files on the file + system of the Prometheus container e.g. bearer token files. + properties: + deny: + type: boolean + type: object + baseImage: + description: 'Base image to use for a Prometheus deployment. Deprecated: + use ''image'' instead' + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace + as the Prometheus object, which shall be mounted into the Prometheus + Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or + modifying operator generated containers. This can be used to allow + adding an authentication proxy to a Prometheus pod or to change + the behavior of an operator generated container. Containers described + here modify an operator generated container if they share the same + name and modifications are done via a strategic merge patch. The + current container names are: `prometheus`, `config-reloader`, and + `thanos-sidecar`. Overriding containers is entirely outside the + scope of what the maintainers will support and by doing so, you + accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + disableCompaction: + description: Disable prometheus compaction. + type: boolean + enableAdminAPI: + description: 'Enable access to prometheus web admin API. Defaults + to the value of `false`. WARNING: Enabling the admin APIs enables + mutating endpoints, to delete data, shutdown Prometheus, and more. + Enabling this should be done with care and the user is advised to + add additional authentication authorization via a proxy to ensure + only clients authorized to perform these actions can do so. For + more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' + type: boolean + enableFeatures: + description: Enable access to Prometheus disabled features. By default, + no features are enabled. Enabling disabled features is entirely + outside the scope of what the maintainers will support and by doing + so, you accept that this behaviour may break at any time without + notice. For more information see https://prometheus.io/docs/prometheus/latest/disabled_features/ + items: + type: string + type: array + enforcedBodySizeLimit: + description: 'EnforcedBodySizeLimit defines the maximum size of uncompressed + response body that will be accepted by Prometheus. Targets responding + with a body larger than this many bytes will cause the scrape to + fail. Example: 100MB. If defined, the limit will apply to all service/pod + monitors and probes. This is an experimental feature, this behaviour + could change or be removed in the future. Only valid in Prometheus + versions 2.28.0 and newer.' + type: string + enforcedLabelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. If more than this number of labels are present post + metric-relabeling, the entire scrape will be treated as failed. + 0 means no limit. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + enforcedLabelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. If a label name is longer than this number + post metric-relabeling, the entire scrape will be treated as failed. + 0 means no limit. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + enforcedLabelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. If a label value is longer than this number + post metric-relabeling, the entire scrape will be treated as failed. + 0 means no limit. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + enforcedNamespaceLabel: + description: "EnforcedNamespaceLabel If set, a label will be added + to \n 1. all user-metrics (created by `ServiceMonitor`, `PodMonitor` + and `ProbeConfig` object) and 2. in all `PrometheusRule` objects + (except the ones excluded in `prometheusRulesExcludedFromEnforce`) + to * alerting & recording rules and * the metrics used in + their expressions (`expr`). \n Label name is this field's value. + Label value is the namespace of the created object (mentioned above)." + type: string + enforcedSampleLimit: + description: EnforcedSampleLimit defines global limit on number of + scraped samples that will be accepted. This overrides any SampleLimit + set per ServiceMonitor or/and PodMonitor. It is meant to be used + by admins to enforce the SampleLimit to keep overall number of samples/series + under the desired limit. Note that if SampleLimit is lower that + value will be taken instead. + format: int64 + type: integer + enforcedTargetLimit: + description: EnforcedTargetLimit defines a global limit on the number + of scraped targets. This overrides any TargetLimit set per ServiceMonitor + or/and PodMonitor. It is meant to be used by admins to enforce + the TargetLimit to keep the overall number of targets under the + desired limit. Note that if TargetLimit is lower, that value will + be taken instead, except if either value is zero, in which case + the non-zero value will be used. If both values are zero, no limit + is enforced. + format: int64 + type: integer + evaluationInterval: + description: 'Interval between consecutive evaluations. Default: `1m`' + type: string + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating + with external systems (federation, remote storage, Alertmanager). + type: object + externalUrl: + description: The external URL the Prometheus instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Prometheus is not served from root of a DNS name. + type: string + ignoreNamespaceSelectors: + description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector + settings from the podmonitor and servicemonitor configs, and they + will only discover endpoints within their current namespace. Defaults + to false. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag + and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Prometheus + is being configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same + namespace to use for pulling prometheus and alertmanager images + from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the Prometheus configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart + of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + InitContainers described here modify an operator generated init + containers if they share the same name and modifications are done + via a strategic merge patch. The current init container name is: + `init-config-reloader`. Overriding init containers is entirely outside + the scope of what the maintainers will support and by doing so, + you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Prometheus server listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for Prometheus to be configured with. + type: string + logLevel: + description: Log level for Prometheus to be configured with. + type: string + minReadySeconds: + description: Minimum number of seconds for which a newly created pod + should be ready without any of its container crashing for it to + be considered available. Defaults to 0 (pod will be considered available + as soon as it is ready) This is an alpha field and requires enabling + StatefulSetMinReadySeconds feature gate. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + overrideHonorLabels: + description: OverrideHonorLabels if set to true overrides all user + configured honor_labels. If HonorLabels is set in ServiceMonitor + or PodMonitor to true, this overrides honor_labels to false. + type: boolean + overrideHonorTimestamps: + description: OverrideHonorTimestamps allows to globally enforce honoring + timestamps in all scrape configs. + type: boolean + paused: + description: When a Prometheus deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are + propagated to the prometheus pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a + client to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + podMonitorNamespaceSelector: + description: Namespace's labels to match for PodMonitor discovery. + If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + podMonitorSelector: + description: '*Experimental* PodMonitors to be selected for target + discovery. *Deprecated:* if neither this nor serviceMonitorSelector + are specified, configuration is unmanaged.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + probeNamespaceSelector: + description: '*Experimental* Namespaces to be selected for Probe discovery. + If nil, only check own namespace.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + probeSelector: + description: '*Experimental* Probes to be selected for target discovery.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + prometheusExternalLabelName: + description: Name of Prometheus external label used to denote Prometheus + instance name. Defaults to the value of `prometheus`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + prometheusRulesExcludedFromEnforce: + description: PrometheusRulesExcludedFromEnforce - list of prometheus + rules to be excluded from enforcing of adding namespace labels. + Works only if enforcedNamespaceLabel set to true. Make sure both + ruleNamespace and ruleName are set for each pair + items: + description: PrometheusRuleExcludeConfig enables users to configure + excluded PrometheusRule names and their namespaces to be ignored + while enforcing namespace label for alerts and metrics. + properties: + ruleName: + description: RuleNamespace - name of excluded rule + type: string + ruleNamespace: + description: RuleNamespace - namespace of excluded rule + type: string + required: + - ruleName + - ruleNamespace + type: object + type: array + query: + description: QuerySpec defines the query command line flags when starting + Prometheus. + properties: + lookbackDelta: + description: The delta difference allowed for retrieving metrics + during expression evaluations. + type: string + maxConcurrency: + description: Number of concurrent queries that can be run at once. + format: int32 + type: integer + maxSamples: + description: Maximum number of samples a single query can load + into memory. Note that queries will fail if they would load + more samples than this into memory, so this also limits the + number of samples a query can return. + format: int32 + type: integer + timeout: + description: Maximum time a query may take before being aborted. + type: string + type: object + queryLogFile: + description: QueryLogFile specifies the file to which PromQL queries + are logged. Note that this location must be writable, and can be + persisted using an attached volume. Alternatively, the location + can be set to a stdout location such as `/dev/stdout` to log querie + information to the default Prometheus log stream. This is only available + in versions of Prometheus >= 2.16.0. For more details, see the Prometheus + docs (https://prometheus.io/docs/guides/query-log/) + type: string + remoteRead: + description: If specified, the remote_read spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteReadSpec defines the remote_read configuration + for prometheus. + properties: + authorization: + description: Authorization section for remote read + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + credentialsFile: + description: File to read a secret from, mutually exclusive + with Credentials (from SafeAuthorization) + type: string + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the URL. + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for remote read. + type: string + bearerTokenFile: + description: File to read bearer token for remote read. + type: string + headers: + additionalProperties: + type: string + description: Custom HTTP headers to be sent along with each + remote read request. Be aware that headers that are set by + Prometheus itself can't be overwritten. Only valid in Prometheus + versions 2.26.0 and newer. + type: object + name: + description: The name of the remote read queue, must be unique + if specified. The name is used in metrics and logging in order + to differentiate read configurations. Only valid in Prometheus + versions 2.15.0 and newer. + type: string + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 + client id + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyUrl: + description: Optional ProxyURL + type: string + readRecent: + description: Whether reads should be made for queries for time + ranges that the local storage should have complete data for. + type: boolean + remoteTimeout: + description: Timeout for requests to the remote read endpoint. + type: string + requiredMatchers: + additionalProperties: + type: string + description: An optional list of equality matchers which have + to be present in a selector to query the remote read endpoint. + type: object + tlsConfig: + description: TLS Config to use for remote read. + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + required: + - url + type: object + type: array + remoteWrite: + description: If specified, the remote_write spec. This is an experimental + feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteWriteSpec defines the remote_write configuration + for prometheus. + properties: + authorization: + description: Authorization section for remote write + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + credentialsFile: + description: File to read a secret from, mutually exclusive + with Credentials (from SafeAuthorization) + type: string + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: BasicAuth for the URL. + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for remote write. + type: string + bearerTokenFile: + description: File to read bearer token for remote write. + type: string + headers: + additionalProperties: + type: string + description: Custom HTTP headers to be sent along with each + remote write request. Be aware that headers that are set by + Prometheus itself can't be overwritten. Only valid in Prometheus + versions 2.25.0 and newer. + type: object + metadataConfig: + description: MetadataConfig configures the sending of series + metadata to remote storage. + properties: + send: + description: Whether metric metadata is sent to remote storage + or not. + type: boolean + sendInterval: + description: How frequently metric metadata is sent to remote + storage. + type: string + type: object + name: + description: The name of the remote write queue, must be unique + if specified. The name is used in metrics and logging in order + to differentiate queues. Only valid in Prometheus versions + 2.15.0 and newer. + type: string + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 + client id + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyUrl: + description: Optional ProxyURL + type: string + queueConfig: + description: QueueConfig allows tuning of the remote write queue + parameters. + properties: + batchSendDeadline: + description: BatchSendDeadline is the maximum time a sample + will wait in buffer. + type: string + capacity: + description: Capacity is the number of samples to buffer + per shard before we start dropping them. + type: integer + maxBackoff: + description: MaxBackoff is the maximum retry delay. + type: string + maxRetries: + description: MaxRetries is the maximum number of times to + retry a batch on recoverable errors. + type: integer + maxSamplesPerSend: + description: MaxSamplesPerSend is the maximum number of + samples per send. + type: integer + maxShards: + description: MaxShards is the maximum number of shards, + i.e. amount of concurrency. + type: integer + minBackoff: + description: MinBackoff is the initial retry delay. Gets + doubled for every retry. + type: string + minShards: + description: MinShards is the minimum number of shards, + i.e. amount of concurrency. + type: integer + retryOnRateLimit: + description: Retry upon receiving a 429 status code from + the remote-write storage. This is experimental feature + and might change in the future. + type: boolean + type: object + remoteTimeout: + description: Timeout for requests to the remote write endpoint. + type: string + sendExemplars: + description: Enables sending of exemplars over remote write. + Note that exemplar-storage itself must be enabled using the + enableFeature option for exemplars to be scraped in the first + place. Only valid in Prometheus versions 2.27.0 and newer. + type: boolean + sigv4: + description: Sigv4 allows to configures AWS's Signature Verification + 4 + properties: + accessKey: + description: AccessKey is the AWS API key. If blank, the + environment variable `AWS_ACCESS_KEY_ID` is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + profile: + description: Profile is the named AWS profile used to authenticate. + type: string + region: + description: Region is the AWS region. If blank, the region + from the default credentials chain used. + type: string + roleArn: + description: RoleArn is the named AWS profile used to authenticate. + type: string + secretKey: + description: SecretKey is the AWS API secret. If blank, + the environment variable `AWS_SECRET_ACCESS_KEY` is used. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + tlsConfig: + description: TLS Config to use for remote write. + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + writeRelabelConfigs: + description: The list of remote write relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + required: + - url + type: object + type: array + replicaExternalLabelName: + description: Name of Prometheus external label used to denote replica + name. Defaults to the value of `prometheus_replica`. External label + will _not_ be added when value is set to empty string (`""`). + type: string + replicas: + description: Number of replicas of each shard to deploy for a Prometheus + deployment. Number of replicas multiplied by shards is the total + number of Pods created. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + retention: + description: Time duration Prometheus shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + retentionSize: + description: 'Maximum amount of disk space used by blocks. Supported + units: B, KB, MB, GB, TB, PB, EB. Ex: `512MB`.' + type: string + routePrefix: + description: The route prefix Prometheus registers HTTP handlers for. + This is useful, if using ExternalURL and a proxy is rewriting HTTP + routes of a request, and the actual ExternalURL is still true, but + the server serves requests under a different route prefix. For example + for use with `kubectl proxy`. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. + If unspecified, only the same namespace as the Prometheus object + is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A selector to select which PrometheusRules to mount for + loading alerting/recording rules from. Until (excluding) Prometheus + Operator v0.24.0 Prometheus Operator will migrate any legacy rule + ConfigMaps to PrometheusRule custom resources selected by RuleSelector. + Make sure it does not match any config maps that you do not want + to be migrated. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + rules: + description: /--rules.*/ command-line arguments. + properties: + alert: + description: /--rules.alert.*/ command-line arguments + properties: + forGracePeriod: + description: Minimum duration between alert and restored 'for' + state. This is maintained only for alerts with configured + 'for' time greater than grace period. + type: string + forOutageTolerance: + description: Max time to tolerate prometheus outage for restoring + 'for' state of alert. + type: string + resendDelay: + description: Minimum amount of time to wait before resending + an alert to Alertmanager. + type: string + type: object + type: object + scrapeInterval: + description: 'Interval between consecutive scrapes. Default: `1m`' + type: string + scrapeTimeout: + description: Number of seconds to wait for target to respond before + erroring. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as + the Prometheus object, which shall be mounted into the Prometheus + Pods. The Secrets are mounted into /etc/prometheus/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. Note + that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount + to use to run the Prometheus Pods. + type: string + serviceMonitorNamespaceSelector: + description: Namespace's labels to match for ServiceMonitor discovery. + If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + serviceMonitorSelector: + description: ServiceMonitors to be selected for target discovery. + *Deprecated:* if neither this nor podMonitorSelector are specified, + configuration is unmanaged. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + sha: + description: 'SHA of Prometheus container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. Deprecated: use ''image'' instead. The image digest + can be specified as part of the image URL.' + type: string + shards: + description: 'EXPERIMENTAL: Number of shards to distribute targets + onto. Number of replicas multiplied by shards is the total number + of Pods created. Note that scaling down shards will not reshard + data onto remaining instances, it must be manually moved. Increasing + shards will not reshard data either but it will continue to be available + from the same instances. To query globally use Thanos sidecar and + Thanos querier or remote write data to a central location. Sharding + is done on the content of the `__address__` target meta-label.' + format: int32 + type: integer + storage: + description: Storage spec to specify how storage shall be used. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default + in a future release, this option will become unnecessary. DisableMountSubPath + allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: 'EphemeralVolumeSource to be used by the Prometheus + StatefulSets. This is a beta field in k8s 1.21, for lower versions, + starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC will + be deleted together with the pod. The name of the PVC will + be `-` where `` is the + name from the `PodSpec.Volumes` array entry. Pod validation + will reject the pod if the concatenated name is not valid + for a PVC (for example, too long). \n An existing PVC with + that name that is not owned by the pod will *not* be used + for the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated PVC + is removed. If such a pre-created PVC is meant to be used + by the pod, the PVC has to updated with an owner reference + to the pod once the pod exists. Normally this should not + be necessary, but it may be useful when manually reconstructing + a broken cluster. \n This field is read-only and no changes + will be made by Kubernetes to the PVC after it has been + created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will + be copied into the PVC when creating it. No other fields + are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified data + source. If the AnyVolumeDataSource feature gate + is enabled, this field will always have the same + contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API + group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the DataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the other + is non-empty. There are two important differences + between DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by + the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is + implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST + resource this object represents. Servers may infer this + from the endpoint the client submits requests to. Cannot + be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to + an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. They + are not queryable and should be preserved when modifying + objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. + Is required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be + updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of + a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always have + the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API group + (non core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they + must have the same value. For backwards compatibility, + both fields (DataSource and DataSourceRef) will be set + to the same value automatically if one of them is empty + and the other is non-empty. There are two important + differences between DataSource and DataSourceRef: * + While DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed value + is specified. (Alpha) Using this field requires the + AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the status + field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: The storage resource within AllocatedResources + tracks the capacity allocated to a PVC. It may be larger + than the actual capacity when a volume expansion operation + is requested. For storage quota, the larger value from + allocatedResources and PVC.spec.resources is used. If + allocatedResources is not set, PVC.spec.resources alone + is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower + than the requested capacity. This is an alpha field + and requires enabling RecoverVolumeExpansionFailure + feature. + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails + details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is + being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType + is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + resizeStatus: + description: ResizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Prometheus container image to be deployed. Defaults + to the value of `version`. Version is ignored if Tag is set. Deprecated: + use ''image'' instead. The image tag can be specified as part of + the image URL.' + type: string + thanos: + description: "Thanos configuration allows configuring various aspects + of a Prometheus server in a Thanos environment. \n This section + is experimental, it may change significantly without deprecation + notice in any release. \n This is experimental and may change significantly + without backward compatibility in any release." + properties: + baseImage: + description: 'Thanos base image if other than default. Deprecated: + use ''image'' instead' + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from + which Thanos Querier reads recorded rule data. Note: Currently + only the CAFile, CertFile, and KeyFile fields are supported. + Maps to the ''--grpc-server-tls-*'' CLI args.' + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + image: + description: Image if specified has precedence over baseImage, + tag and sha combinations. Specifying the version is still necessary + to ensure the Prometheus Operator knows what version of Thanos + is being configured. + type: string + listenLocal: + description: ListenLocal makes the Thanos sidecar listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: LogFormat for Thanos sidecar to be configured with. + type: string + logLevel: + description: LogLevel for Thanos sidecar to be configured with. + type: string + minTime: + description: MinTime for Thanos sidecar to be configured with. + Option can be a constant time in RFC3339 format or time duration + relative to current time, such as -1d or 2h45m. Valid duration + units are ms, s, m, h, d, w, y. + type: string + objectStorageConfig: + description: ObjectStorageConfig configures object storage in + Thanos. Alternative to ObjectStorageConfigFile, and lower order + priority. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + objectStorageConfigFile: + description: ObjectStorageConfigFile specifies the path of the + object storage configuration file. When used alongside with + ObjectStorageConfig, ObjectStorageConfigFile takes precedence. + type: string + readyTimeout: + description: ReadyTimeout is the maximum time Thanos sidecar will + wait for Prometheus to start. Eg 10m + type: string + resources: + description: Resources defines the resource requirements for the + Thanos sidecar. If not provided, no requests/limits will be + set + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + sha: + description: 'SHA of Thanos container image to be deployed. Defaults + to the value of `version`. Similar to a tag, but the SHA explicitly + deploys an immutable container image. Version and Tag are ignored + if SHA is set. Deprecated: use ''image'' instead. The image + digest can be specified as part of the image URL.' + type: string + tag: + description: 'Tag of Thanos sidecar container image to be deployed. + Defaults to the value of `version`. Version is ignored if Tag + is set. Deprecated: use ''image'' instead. The image tag can + be specified as part of the image URL.' + type: string + tracingConfig: + description: TracingConfig configures tracing in Thanos. This + is an experimental feature, it may change in any upcoming release + in a breaking way. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + tracingConfigFile: + description: TracingConfig specifies the path of the tracing configuration + file. When used alongside with TracingConfig, TracingConfigFile + takes precedence. + type: string + version: + description: Version describes the version of Thanos to use. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified + will be appended to other VolumeMounts in the thanos-sidecar + container. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + For example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 1/1/0: | zone1 + | zone2 | zone3 | | P | P | | - if MaxSkew is + 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + scheduling it onto zone1(zone2) would make the ActualSkew(2-0) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version of Prometheus to be deployed. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts + on the output StatefulSet definition. VolumeMounts specified will + be appended to other VolumeMounts in the prometheus container, that + are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume should + be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly to + SubPath but environment variable references $(VAR_NAME) are + expanded using the container's environment. Defaults to "" + (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on + the output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to + populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) or + a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + walCompression: + description: Enable compression of the write-ahead log using Snappy. + This flag is only available in versions of Prometheus >= 2.11.0. + type: boolean + web: + description: WebSpec defines the web command line flags when starting + Prometheus. + properties: + pageTitle: + description: The prometheus web page title + type: string + tlsConfig: + description: WebTLSConfig defines the TLS parameters for HTTPS. + properties: + cert: + description: Contains the TLS certificate for the server. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + cipherSuites: + description: 'List of supported cipher suites for TLS versions + up to TLS 1.2. If empty, Go default cipher suites are used. + Available cipher suites are documented in the go documentation: + https://golang.org/pkg/crypto/tls/#pkg-constants' + items: + type: string + type: array + client_ca: + description: Contains the CA certificate for client certificate + authentication to the server. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientAuthType: + description: 'Server policy for client authentication. Maps + to ClientAuth Policies. For more detail on clientAuth options: + https://golang.org/pkg/crypto/tls/#ClientAuthType' + type: string + curvePreferences: + description: 'Elliptic curves that will be used in an ECDHE + handshake, in preference order. Available curves are documented + in the go documentation: https://golang.org/pkg/crypto/tls/#CurveID' + items: + type: string + type: array + keySecret: + description: Secret containing the TLS key for the server. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + maxVersion: + description: Maximum TLS version that is acceptable. Defaults + to TLS13. + type: string + minVersion: + description: Minimum TLS version that is acceptable. Defaults + to TLS12. + type: string + preferServerCipherSuites: + description: Controls whether the server selects the client's + most preferred cipher suite, or the server's most preferred + cipher suite. If true then the server's preference, as expressed + in the order of elements in cipherSuites, is used. + type: boolean + required: + - cert + - keySecret + type: object + type: object + type: object + status: + description: 'Most recent observed status of the Prometheus cluster. Read-only. + Not included when requesting from the apiserver, only from the Prometheus + Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this Prometheus deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this + Prometheus deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Prometheus + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this + Prometheus deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/prometheusrule_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/prometheusrule_crd_0_53_1.yaml new file mode 100644 index 000000000..1dd024388 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/prometheusrule_crd_0_53_1.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: prometheusrules.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PrometheusRule + listKind: PrometheusRuleList + plural: prometheusrules + singular: prometheusrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PrometheusRule defines recording and alerting rules for a Prometheus + instance + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: 'RuleGroup is a list of sequentially evaluated recording + and alerting rules. Note: PartialResponseStrategy is only used + by ThanosRuler and will be ignored by Prometheus instances. Valid + values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response' + properties: + interval: + type: string + name: + type: string + partial_response_strategy: + type: string + rules: + items: + description: 'Rule describes an alerting or recording rule + See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) + or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) + rule' + properties: + alert: + type: string + annotations: + additionalProperties: + type: string + type: object + expr: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + type: string + labels: + additionalProperties: + type: string + type: object + record: + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/servicemonitor_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/servicemonitor_crd_0_53_1.yaml new file mode 100644 index 000000000..26b3003ab --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/servicemonitor_crd_0_53_1.yaml @@ -0,0 +1,607 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: servicemonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ServiceMonitor + listKind: ServiceMonitorList + plural: servicemonitors + singular: servicemonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceMonitor defines monitoring for a set of services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Service selection for target discovery + by Prometheus. + properties: + endpoints: + description: A list of endpoints allowed as part of this ServiceMonitor. + items: + description: Endpoint defines a scrapeable endpoint serving Prometheus + metrics. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: The secret's key that contains the credentials + of the request + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: + description: Set the authentication type. Defaults to Bearer, + Basic will cause an error + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over + basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' + properties: + password: + description: The secret in the service monitor namespace + that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace + that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenFile: + description: File to read bearer token for scraping targets. + type: string + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping + targets. The secret needs to be in the same namespace as the + service monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions + with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects + the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before + ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions + 2.27.0 and newer. + properties: + clientId: + description: The secret or configmap containing the OAuth2 + client id + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + clientSecret: + description: The secret containing the OAuth2 client secret + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + endpointParams: + additionalProperties: + type: string + description: Parameters to append to the token URL + type: object + scopes: + description: OAuth2 scopes used for the token request + items: + type: string + type: array + tokenUrl: + description: The URL to fetch the token from + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the service port this endpoint refers to. + Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes + to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. + Prometheus Operator automatically adds relabelings for a few + standard Kubernetes fields and replaces original scrape job + name with __tmp_prometheus_job_name. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the + label set, being applied to samples before ingestion. It + defines ``-section of Prometheus + configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. + Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source + label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted + value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace + is performed if the regular expression matches. Regex + capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source + label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing + labels. Their content is concatenated using the configured + separator and matched against the configured regular + expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written + in a replace action. It is mandatory for replace actions. + Regex capture groups are available. + type: string + type: object + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: Name or number of the target port of the Pod behind + the Service, the port must be specified with container port + property. Mutually exclusive with port. + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint + properties: + ca: + description: Struct containing the CA cert to use for the + targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container + to use for the targets. + type: string + cert: + description: Struct containing the client cert file for + the targets. + properties: + configMap: + description: ConfigMap containing data to use for the + targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus + container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus + container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the + targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + type: array + jobLabel: + description: "Chooses the label of the Kubernetes `Endpoints`. Its + value will be used for the `job`-label's value of the created metrics. + \n Default & fallback value: the name of the respective Kubernetes + `Endpoint`." + type: string + labelLimit: + description: Per-scrape limit on number of labels that will be accepted + for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be + accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will + be accepted for a sample. Only valid in Prometheus versions 2.27.0 + and newer. + format: int64 + type: integer + namespaceSelector: + description: Selector to select which namespaces the Kubernetes Endpoints + objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected + in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes `Pod` + onto the created metrics. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped + samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Endpoints objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + targetLabels: + description: TargetLabels transfers labels from the Kubernetes `Service` + onto the created metrics. All labels set in `selector.matchLabels` + are automatically transferred. + items: + type: string + type: array + targetLimit: + description: TargetLimit defines a limit on the number of scraped + targets that will be accepted. + format: int64 + type: integer + required: + - endpoints + - selector + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/crd/thanosruler_crd_0_53_1.yaml b/operators/endpointmetrics/manifests/prometheus/crd/thanosruler_crd_0_53_1.yaml new file mode 100644 index 000000000..d514f1484 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/crd/thanosruler_crd_0_53_1.yaml @@ -0,0 +1,6204 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: thanosrulers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ThanosRuler + listKind: ThanosRulerList + plural: thanosrulers + singular: thanosruler + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ThanosRuler defines a ThanosRuler deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the ThanosRuler + cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces field. + null selector and null or empty namespaces list + means "this pod's namespace". An empty selector + ({}) matches all namespaces. This field is beta-level + and is only honored when PodAffinityNamespaceSelector + feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. The + term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces list + and null namespaceSelector means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied to the + union of the namespaces selected by this field and + the ones listed in the namespaces field. null selector + and null or empty namespaces list means "this pod's + namespace". An empty selector ({}) matches all namespaces. + This field is beta-level and is only honored when + PodAffinityNamespaceSelector feature is enabled. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies a static list of namespace + names that the term applies to. The term is applied + to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. null or + empty namespaces list and null namespaceSelector means + "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertDropLabels: + description: AlertDropLabels configure the label names which should + be dropped in ThanosRuler alerts. The replica label `thanos_ruler_replica` + will always be dropped in alerts. + items: + type: string + type: array + alertQueryUrl: + description: The external Query URL the Thanos Ruler will set in the + 'Source' field of all alerts. Maps to the '--alert.query-url' CLI + arg. + type: string + alertRelabelConfigFile: + description: AlertRelabelConfigFile specifies the path of the alert + relabeling configuration file. When used alongside with AlertRelabelConfigs, + alertRelabelConfigFile takes precedence. + type: string + alertRelabelConfigs: + description: 'AlertRelabelConfigs configures alert relabeling in ThanosRuler. + Alert relabel configurations must have the form as specified in + the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs + Alternative to AlertRelabelConfigFile, and lower order priority.' + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + alertmanagersConfig: + description: Define configuration for connecting to alertmanager. Only + available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` + arg. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + alertmanagersUrl: + description: 'Define URLs to send alerts to Alertmanager. For Thanos + v0.10.0 and higher, AlertManagersConfig should be used instead. Note: + this field will be ignored if AlertManagersConfig is specified. + Maps to the `alertmanagers.url` arg.' + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or + modifying operator generated containers. This can be used to allow + adding an authentication proxy to a ThanosRuler pod or to change + the behavior of an operator generated container. Containers described + here modify an operator generated container if they share the same + name and modifications are done via a strategic merge patch. The + current container names are: `thanos-ruler` and `config-reloader`. + Overriding containers is entirely outside the scope of what the + maintainers will support and by doing so, you accept that this behaviour + may break at any time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label + of origin for each alert and metric that is user created. The label + value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalPrefix: + description: The external URL the Thanos Ruler instances will be available + under. This is necessary to generate correct URLs. This is necessary + if Thanos Ruler is not served from root of a DNS name. + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from + which Thanos Querier reads recorded rule data. Note: Currently only + the CAFile, CertFile, and KeyFile fields are supported. Maps to + the ''--grpc-server-tls-*'' CLI args.' + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to + use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container + for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container + for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + image: + description: Thanos container image URL. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same + namespace to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to + let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod + definition. Those can be used to e.g. fetch secrets for injection + into the ThanosRuler configuration from external sources. Any errors + during the execution of an initContainer will lead to a restart + of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + Using initContainers for any use case other then secret fetching + is entirely outside the scope of what the maintainers will support + and by doing so, you accept that this behaviour may break at any + time without notice.' + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable + exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: + https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) + syntax: i.e. "$$(VAR_NAME)" will produce the string + literal "$(VAR_NAME)". Escaped references will never + be expanded, regardless of whether the variable exists + or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period + countdown begins before the PreStop hook is executed. + Regardless of the outcome of the handler, the container + will eventually terminate within the Pod''s termination + grace period (unless delayed by finalizers). Other management + of the container blocks until the hook completes or until + the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported + as a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More + info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. Note that this field cannot be + set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. Note that this field + cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components + that enable the WindowsHostProcessContainers feature + flag. Setting this field without the feature flag + will result in errors when validating the Pod. All + of a Pod's containers must have the same effective + HostProcess value (it is not allowed to have a mix + of HostProcess containers and non-HostProcess containers). In + addition, if HostProcess is true then HostNetwork + must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is an alpha field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to + place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior + is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs + to terminate gracefully upon probe failure. The grace + period is the duration in seconds after the processes + running in the pod are sent a termination signal and the + time when the processes are forcibly halted with a kill + signal. Set this value longer than the expected cleanup + time for your process. If this value is nil, the pod's + terminationGracePeriodSeconds will be used. Otherwise, + this value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates + stop immediately via the kill signal (no opportunity to + shut down). This is a beta field and requires enabling + ProbeTerminationGracePeriod feature gate. Minimum value + is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels configure the external label pairs to ThanosRuler. + A default replica label `thanos_ruler_replica` will be always added as + a label with the value of the pod's name and it will be dropped + in the alerts. + type: object + listenLocal: + description: ListenLocal makes the Thanos ruler listen on loopback, + so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for ThanosRuler to be configured with. + type: string + logLevel: + description: Log level for ThanosRuler to be configured with. + type: string + minReadySeconds: + description: Minimum number of seconds for which a newly created pod + should be ready without any of its container crashing for it to + be considered available. Defaults to 0 (pod will be considered available + as soon as it is ready) This is an alpha field and requires enabling + StatefulSetMinReadySeconds feature gate. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. + Alternative to ObjectStorageConfigFile, and lower order priority. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + objectStorageConfigFile: + description: ObjectStorageConfigFile specifies the path of the object + storage configuration file. When used alongside with ObjectStorageConfig, + ObjectStorageConfigFile takes precedence. + type: string + paused: + description: When a ThanosRuler deployment is paused, no actions except + for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata contains Labels and Annotations gets propagated + to the thanos ruler pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store and + retrieve arbitrary metadata. They are not queryable and should + be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to + organize and categorize (scope and select) objects. May match + selectors of replication controllers and services. More info: + http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow a + client to request the generation of an appropriate name automatically. + Name is primarily intended for creation idempotence and configuration + definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + description: Port name used for the pods and governing service. This + defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + prometheusRulesExcludedFromEnforce: + description: PrometheusRulesExcludedFromEnforce - list of Prometheus + rules to be excluded from enforcing of adding namespace labels. + Works only if enforcedNamespaceLabel set to true. Make sure both + ruleNamespace and ruleName are set for each pair + items: + description: PrometheusRuleExcludeConfig enables users to configure + excluded PrometheusRule names and their namespaces to be ignored + while enforcing namespace label for alerts and metrics. + properties: + ruleName: + description: RuleNamespace - name of excluded rule + type: string + ruleNamespace: + description: RuleNamespace - namespace of excluded rule + type: string + required: + - ruleName + - ruleNamespace + type: object + type: array + queryConfig: + description: Define configuration for connecting to thanos query instances. + If this is defined, the QueryEndpoints field will be ignored. Maps + to the `query.config` CLI argument. Only available with thanos v0.11.0 + and higher. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + queryEndpoints: + description: QueryEndpoints defines Thanos querier endpoints from + which to query metrics. Maps to the --query flag of thanos ruler. + items: + type: string + type: array + replicas: + description: Number of thanos ruler instances to deploy. + format: int32 + type: integer + resources: + description: Resources defines the resource requirements for single + Pods. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + retention: + description: Time duration ThanosRuler shall retain data for. Default + is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` + (milliseconds seconds minutes hours days weeks years). + type: string + routePrefix: + description: The route prefix ThanosRuler registers HTTP handlers + for. This allows thanos UI to be served on a sub-path. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for Rules discovery. If unspecified, + only the same namespace as the ThanosRuler object is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + ruleSelector: + description: A label selector to select which PrometheusRules to mount + for alerting and recording. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and + common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all + containers in a pod. Some volume types allow the Kubelet to + change the ownership of that volume to be owned by the pod: + \n 1. The owning GID will be the FSGroup 2. The setgid bit is + set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- \n If unset, + the Kubelet will not modify the ownership and permissions of + any volume. Note that this field cannot be set when spec.os.name + is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing + ownership and permission of the volume before being exposed + inside Pod. This field will only apply to volume types which + support fsGroup based ownership(and permissions). It will have + no effect on ephemeral volume types such as: secret, configmaps + and emptydir. Valid values are "OnRootMismatch" and "Always". + If not specified, "Always" is used. Note that this field cannot + be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. + Uses runtime default if unset. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail to start + the container if it does. If unset or false, no such validation + will be performed. May also be set in SecurityContext. If set + in both SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. Note that this field cannot + be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, the value + specified in SecurityContext takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to + the container. + type: string + role: + description: Role is a SELinux role label that applies to + the container. + type: string + type: + description: Type is a SELinux type label that applies to + the container. + type: string + user: + description: User is a SELinux user label that applies to + the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this + pod. Note that this field cannot be set when spec.os.name is + windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must be + preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a profile + defined in a file on the node should be used. RuntimeDefault + - the container runtime default profile should be used. + Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run + in each container, in addition to the container's primary GID. If + unspecified, no groups will be added to any container. Note + that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for + the pod. Pods with unsupported sysctls (by the container runtime) + might fail to launch. Note that this field cannot be set when + spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named by + the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA + credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is alpha-level + and will only be honored by components that enable the WindowsHostProcessContainers + feature flag. Setting this field without the feature flag + will result in errors when validating the Pod. All of a + Pod's containers must have the same effective HostProcess + value (it is not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in PodSecurityContext. + If set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount + to use to run the Thanos Ruler Pods. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default + in a future release, this option will become unnecessary. DisableMountSubPath + allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus + StatefulSets. If specified, used in place of any volumeClaimTemplate. + More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this + EmptyDir volume. The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir would + be the minimum value between the SizeLimit specified here + and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: 'EphemeralVolumeSource to be used by the Prometheus + StatefulSets. This is a beta field in k8s 1.21, for lower versions, + starting with k8s 1.19, it requires enabling the GenericEphemeralVolume + feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC will + be deleted together with the pod. The name of the PVC will + be `-` where `` is the + name from the `PodSpec.Volumes` array entry. Pod validation + will reject the pod if the concatenated name is not valid + for a PVC (for example, too long). \n An existing PVC with + that name that is not owned by the pod will *not* be used + for the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated PVC + is removed. If such a pre-created PVC is meant to be used + by the pod, the PVC has to updated with an owner reference + to the pod once the pod exists. Normally this should not + be necessary, but it may be useful when manually reconstructing + a broken cluster. \n This field is read-only and no changes + will be made by Kubernetes to the PVC after it has been + created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will + be copied into the PVC when creating it. No other fields + are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified data + source. If the AnyVolumeDataSource feature gate + is enabled, this field will always have the same + contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API + group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the DataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the other + is non-empty. There are two important differences + between DataSource and DataSourceRef: * While DataSource + only allows two specific types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by + the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is + implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST + resource this object represents. Servers may infer this + from the endpoint the client submits requests to. Cannot + be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to + an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value + map stored with a resource that may be set by external + tools to store and retrieve arbitrary metadata. They + are not queryable and should be preserved when modifying + objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be + used to organize and categorize (scope and select) objects. + May match selectors of replication controllers and services. + More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. + Is required when creating resources, although some resources + may allow a client to request the generation of an appropriate + name automatically. Name is primarily intended for creation + idempotence and configuration definition. Cannot be + updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of + a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on the + contents of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always have + the same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API group + (non core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only + succeed if the type of the specified object matches + some installed volume populator or dynamic provisioner. + This field will replace the functionality of the DataSource + field and as such if both fields are non-empty, they + must have the same value. For backwards compatibility, + both fields (DataSource and DataSourceRef) will be set + to the same value automatically if one of them is empty + and the other is non-empty. There are two important + differences between DataSource and DataSourceRef: * + While DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as well + as PersistentVolumeClaim objects. * While DataSource + ignores disallowed values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed value + is specified. (Alpha) Using this field requires the + AnyVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the status + field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is omitted + for a container, it defaults to Limits if that is + explicitly specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider for + binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values + array must be non-empty. If the operator is + Exists or DoesNotExist, the values array must + be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is + required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the + PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status + of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes + the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: The storage resource within AllocatedResources + tracks the capacity allocated to a PVC. It may be larger + than the actual capacity when a volume expansion operation + is requested. For storage quota, the larger value from + allocatedResources and PVC.spec.resources is used. If + allocatedResources is not set, PVC.spec.resources alone + is used for quota calculation. If a volume expansion + capacity request is lowered, allocatedResources is only + lowered if there are no expansion operations in progress + and if the actual volume capacity is equal or lower + than the requested capacity. This is an alpha field + and requires enabling RecoverVolumeExpansionFailure + feature. + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying + volume. + type: object + conditions: + description: Current Condition of persistent volume claim. + If underlying persistent volume is being resized then + the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails + details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned + from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details + about last transition. + type: string + reason: + description: Unique, this should be a short, machine + understandable string that gives the reason for + condition's last transition. If it reports "ResizeStarted" + that means the underlying persistent volume is + being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType + is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + resizeStatus: + description: ResizeStatus stores status of resize operation. + ResizeStatus is not set by default but when expansion + is complete resizeStatus is set to empty string by resize + controller or kubelet. This is an alpha field and requires + enabling RecoverVolumeExpansionFailure feature. + type: string + type: object + type: object + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + For example, in a 3-zone cluster, MaxSkew is set to 1, and + pods with the same labelSelector spread as 1/1/0: | zone1 + | zone2 | zone3 | | P | P | | - if MaxSkew is + 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + scheduling it onto zone1(zone2) would make the ActualSkew(2-0) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for + an incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is an + experimental feature, it may change in any upcoming release in a + breaking way. + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + volumes: + description: Volumes allows configuration of additional volumes on + the output StatefulSet definition. Volumes specified will be appended + to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a Optional: mode bits used to set + permissions on created files by default. Must be an octal + value between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults to + 0644. Directories within the path are not affected by + this setting. This might be in conflict with other options + that affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set permissions + on this file, must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is tied + to the pod that defines it - it will be created before the + pod starts, and deleted when the pod is removed. \n Use this + if: a) the volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or capacity + \ tracking are needed, c) the storage driver is specified + through a storage class, and d) the storage driver supports + dynamic volume provisioning through a PersistentVolumeClaim + (see EphemeralVolumeSource for more information on the + connection between this volume type and PersistentVolumeClaim). + \n Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. \n A + pod can use both types of ephemeral volumes and persistent + volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to + provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the PVC + will be deleted together with the pod. The name of the + PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. + Pod validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). \n + An existing PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC + is meant to be used by the pod, the PVC has to updated + with an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may be useful + when manually reconstructing a broken cluster. \n This + field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, must + not be nil." + properties: + metadata: + description: May contain labels and annotations that + will be copied into the PVC when creating it. No other + fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the PVC + that gets created from this template. The same fields + as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the + provisioner or an external controller can support + the specified data source, it will create a new + volume based on the contents of the specified + data source. If the AnyVolumeDataSource feature + gate is enabled, this field will always have the + same contents as the DataSourceRef field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to + populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) or + a PersistentVolumeClaim object. When this field + is specified, volume binding will only succeed + if the type of the specified object matches some + installed volume populator or dynamic provisioner. + This field will replace the functionality of the + DataSource field and as such if both fields are + non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource + and DataSourceRef) will be set to the same value + automatically if one of them is empty and the + other is non-empty. There are two important differences + between DataSource and DataSourceRef: * While + DataSource only allows two specific types of objects, + DataSourceRef allows any non-core object, as + well as PersistentVolumeClaim objects. * While + DataSource ignores disallowed values (dropping + them), DataSourceRef preserves all values, and + generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API + group. For any other third-party types, APIGroup + is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than previous + value but must still be higher than capacity recorded + in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. If Requests + is omitted for a container, it defaults to + Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem + is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to + set permissions on this file. Must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value between + 0000 and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set permissions + on this file. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the ThanosRuler cluster. + Read-only. Not included when requesting from the apiserver, only from + the ThanosRuler Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) + targeted by this ThanosRuler deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed + objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this + ThanosRuler deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this ThanosRuler + deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this + ThanosRuler deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/operators/endpointmetrics/manifests/prometheus/kube-prometheus-rules.yaml b/operators/endpointmetrics/manifests/prometheus/kube-prometheus-rules.yaml deleted file mode 100644 index 0a89e4a4b..000000000 --- a/operators/endpointmetrics/manifests/prometheus/kube-prometheus-rules.yaml +++ /dev/null @@ -1,32 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-prometheus-rules - namespace: open-cluster-management-addon-observability -data: - kube-prometheus-rules.yaml: | - groups: - - name: kube-prometheus-node-recording.rules - rules: - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) - BY (instance) - record: instance:node_cpu:rate:sum - - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) - record: instance:node_network_receive_bytes:rate:sum - - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) - record: instance:node_network_transmit_bytes:rate:sum - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) - WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) - BY (instance, cpu)) BY (instance) - record: instance:node_cpu:ratio - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) - record: cluster:node_cpu:sum_rate5m - - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) - BY (instance, cpu)) - record: cluster:node_cpu:ratio - - name: kube-prometheus-general.rules - rules: - - expr: count without(instance, pod, node) (up == 1) - record: count:up1 - - expr: count without(instance, pod, node) (up == 0) - record: count:up0 \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-alertingrules.yaml b/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-alertingrules.yaml deleted file mode 100644 index 46eeeb9f5..000000000 --- a/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-alertingrules.yaml +++ /dev/null @@ -1,962 +0,0 @@ ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-monitoring-alertingrules - namespace: open-cluster-management-addon-observability -data: - kubernetes-monitoring-alertingrules.yaml: | - groups: - - name: general-rules - rules: - - alert: Watchdog - annotations: - description: | - This is an alert meant to ensure that the entire alerting pipeline is functional. - This alert is always firing, therefore it should always be firing in Alertmanager - and always fire against a receiver. There are integrations with various notification - mechanisms that send a notification when this alert is not firing. For example the - "DeadMansSnitch" integration in PagerDuty. - summary: An alert that should always be firing to certify that Alertmanager - is working properly. - expr: vector(1) - labels: - severity: none - - name: kube-state-metrics - rules: - - alert: KubeStateMetricsListErrors - annotations: - description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. - summary: kube-state-metrics is experiencing errors in list operations. - expr: | - (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) - / - sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m]))) - > 0.01 - for: 15m - labels: - severity: critical - - alert: KubeStateMetricsWatchErrors - annotations: - description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. - summary: kube-state-metrics is experiencing errors in watch operations. - expr: | - (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) - / - sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m]))) - > 0.01 - for: 15m - labels: - severity: critical - - name: kubernetes-apps - rules: - - alert: KubePodCrashLooping - annotations: - description: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf "%.2f" $value }} times / 10 minutes. - summary: Pod is crash looping. - expr: | - rate(kube_pod_container_status_restarts_total{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) * 60 * 5 > 0 - for: 15m - labels: - severity: warning - - alert: KubePodNotReady - annotations: - description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes. - summary: Pod has been in a non-ready state for more than 15 minutes. - expr: | - sum by (namespace, pod) ( - max by(namespace, pod) ( - kube_pod_status_phase{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", phase=~"Pending|Unknown"} - ) * on(namespace, pod) group_left(owner_kind) topk by(namespace, pod) ( - 1, max by(namespace, pod, owner_kind) (kube_pod_owner{owner_kind!="Job"}) - ) - ) > 0 - for: 15m - labels: - severity: warning - - alert: KubeDeploymentGenerationMismatch - annotations: - description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back. - summary: Deployment generation mismatch due to possible roll-back - expr: | - kube_deployment_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_deployment_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - for: 15m - labels: - severity: warning - - alert: KubeStatefulSetReplicasMismatch - annotations: - description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes. - summary: Deployment has not matched the expected number of replicas. - expr: | - ( - kube_statefulset_status_replicas_ready{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_statefulset_status_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) and ( - changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) - == - 0 - ) - for: 15m - labels: - severity: warning - - alert: KubeStatefulSetGenerationMismatch - annotations: - description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back. - summary: StatefulSet generation mismatch due to possible roll-back - expr: | - kube_statefulset_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_statefulset_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - for: 15m - labels: - severity: warning - - alert: KubeStatefulSetUpdateNotRolledOut - annotations: - description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out. - summary: StatefulSet update has not been rolled out. - expr: | - ( - max without (revision) ( - kube_statefulset_status_current_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - unless - kube_statefulset_status_update_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) - * - ( - kube_statefulset_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) - ) and ( - changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) - == - 0 - ) - for: 15m - labels: - severity: warning - - alert: KubeDaemonSetRolloutStuck - annotations: - description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 30 minutes. - summary: DaemonSet rollout is stuck. - expr: | - ( - ( - kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) or ( - kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - 0 - ) or ( - kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) or ( - kube_daemonset_status_number_available{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - ) - ) and ( - changes(kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) - == - 0 - ) - for: 30m - labels: - severity: warning - - alert: KubeContainerWaiting - annotations: - description: Pod {{ $labels.namespace }}/{{ $labels.pod }} container {{ $labels.container}} has been in waiting state for longer than 1 hour. - summary: Pod container waiting longer than 1 hour - expr: | - sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) > 0 - for: 1h - labels: - severity: warning - - alert: KubeDaemonSetNotScheduled - annotations: - description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.' - summary: DaemonSet pods are not scheduled. - expr: | - kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - - - kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 - for: 10m - labels: - severity: warning - - alert: KubeDaemonSetMisScheduled - annotations: - description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.' - summary: DaemonSet pods are misscheduled. - expr: | - kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 - for: 15m - labels: - severity: warning - - alert: KubeJobCompletion - annotations: - description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than 12 hours to complete. - summary: Job did not complete in time - expr: | - kube_job_spec_completions{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - kube_job_status_succeeded{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 - for: 12h - labels: - severity: warning - - alert: KubeJobFailed - annotations: - description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert. - summary: Job failed to complete. - expr: | - kube_job_failed{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 - for: 15m - labels: - severity: warning - - alert: KubeHpaReplicasMismatch - annotations: - description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has not matched the desired number of replicas for longer than 15 minutes. - summary: HPA has not matched descired number of replicas. - expr: | - (kube_hpa_status_desired_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - != - kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) - and - (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - > - kube_hpa_spec_min_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) - and - (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - < - kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) - and - changes(kube_hpa_status_current_replicas[15m]) == 0 - for: 15m - labels: - severity: warning - - alert: KubeHpaMaxedOut - annotations: - description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has been running at max replicas for longer than 15 minutes. - summary: HPA is running at max replicas - expr: | - kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - == - kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - for: 15m - labels: - severity: warning - - name: kubernetes-resources - rules: - - alert: KubeCPUOvercommit - annotations: - description: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. - summary: Cluster has overcommitted CPU resource requests. - expr: | - sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) - / - sum(kube_node_status_allocatable{resource="cpu"}) - > - (count(kube_node_status_allocatable{resource="cpu"}) -1) / count(kube_node_status_allocatable{resource="cpu"}) - for: 5m - labels: - severity: warning - - alert: KubeMemoryOvercommit - annotations: - description: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. - summary: Cluster has overcommitted memory resource requests. - expr: | - sum(namespace_memory:kube_pod_container_resource_requests_bytes:sum{}) - / - sum(kube_node_status_allocatable{resource="memory"}) - > - (count(kube_node_status_allocatable{resource="memory"})-1) - / - count(kube_node_status_allocatable{resource="memory"}) - for: 5m - labels: - severity: warning - - alert: KubeCPUQuotaOvercommit - annotations: - description: Cluster has overcommitted CPU resource requests for Namespaces. - summary: Cluster has overcommitted CPU resource requests. - expr: "sum(kube_resourcequota{namespace=~\"(kube-.*|default|logging)\",job=\"kube-state-metrics\", type=\"hard\", resource=\"cpu\"})\n /\nsum(kube_node_status_allocatable{resource=\"cpu\"}) \n > 1.5\n" - for: 5m - labels: - severity: warning - - alert: KubeMemoryQuotaOvercommit - annotations: - description: Cluster has overcommitted memory resource requests for Namespaces. - summary: Cluster has overcommitted memory resource requests. - expr: | - sum(kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard", resource="memory"}) - / - sum(kube_node_status_allocatable{resource="memory",job="kube-state-metrics"}) - > 1.5 - for: 5m - labels: - severity: warning - - alert: KubeQuotaAlmostFull - annotations: - description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. - summary: Namespace quota is going to be full. - expr: | - kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} - / ignoring(instance, job, type) - (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) - > 0.9 < 1 - for: 15m - labels: - severity: info - - alert: KubeQuotaFullyUsed - annotations: - description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. - summary: Namespace quota is fully used. - expr: | - kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} - / ignoring(instance, job, type) - (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) - == 1 - for: 15m - labels: - severity: info - - alert: KubeQuotaExceeded - annotations: - description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. - summary: Namespace quota has exceeded the limits. - expr: | - kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} - / ignoring(instance, job, type) - (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) - > 1 - for: 15m - labels: - severity: warning - - name: kubernetes-storage - rules: - # - alert: KubePersistentVolumeFillingUp - # annotations: - # description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free. - # summary: PersistentVolume is filling up. - # expr: | - # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} - # / - # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} - # < 0.03 - # for: 1m - # labels: - # severity: critical - # - alert: KubePersistentVolumeFillingUp - # annotations: - # description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available. - # summary: PersistentVolume is filling up. - # expr: | - # ( - # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} - # / - # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} - # ) < 0.15 - # and - # predict_linear(kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 - # for: 1h - # labels: - # severity: warning - - alert: KubePersistentVolumeErrors - annotations: - description: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}. - summary: PersistentVolume is having issues with provisioning. - expr: | - kube_persistentvolume_status_phase{phase=~"Failed|Pending",namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 - for: 5m - labels: - severity: critical - - name: kubernetes-system - rules: - - alert: KubeClientErrors - annotations: - description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.' - summary: Kubernetes API server client is experiencing errors. - expr: | - (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) - / - sum(rate(rest_client_requests_total[5m])) by (instance, job)) - > 0.01 - for: 15m - labels: - severity: warning - - name: kube-apiserver-slos - rules: - - alert: KubeAPIErrorBudgetBurn - annotations: - description: The API server is burning too much error budget. - summary: The API server is burning too much error budget. - expr: | - sum(apiserver_request:burnrate1h) > (14.40 * 0.01000) - and - sum(apiserver_request:burnrate5m) > (14.40 * 0.01000) - for: 2m - labels: - long: 1h - severity: critical - short: 5m - - alert: KubeAPIErrorBudgetBurn - annotations: - description: The API server is burning too much error budget. - summary: The API server is burning too much error budget. - expr: | - sum(apiserver_request:burnrate6h) > (6.00 * 0.01000) - and - sum(apiserver_request:burnrate30m) > (6.00 * 0.01000) - for: 15m - labels: - long: 6h - severity: critical - short: 30m - - alert: KubeAPIErrorBudgetBurn - annotations: - description: The API server is burning too much error budget. - summary: The API server is burning too much error budget. - expr: | - sum(apiserver_request:burnrate1d) > (3.00 * 0.01000) - and - sum(apiserver_request:burnrate2h) > (3.00 * 0.01000) - for: 1h - labels: - long: 1d - severity: warning - short: 2h - - alert: KubeAPIErrorBudgetBurn - annotations: - description: The API server is burning too much error budget. - summary: The API server is burning too much error budget. - expr: | - sum(apiserver_request:burnrate3d) > (1.00 * 0.01000) - and - sum(apiserver_request:burnrate6h) > (1.00 * 0.01000) - for: 3h - labels: - long: 3d - severity: warning - short: 6h - - name: kubernetes-system-apiserver - rules: - - alert: AggregatedAPIErrors - annotations: - description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. It has appeared unavailable {{ $value | humanize }} times averaged over the past 10m. - summary: An aggregated API has reported errors. - expr: | - sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4 - labels: - severity: warning - - alert: AggregatedAPIDown - annotations: - description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m. - summary: An aggregated API is down. - expr: | - (1 - max by(name, namespace)(avg_over_time(aggregator_unavailable_apiservice[10m]))) * 100 < 85 - for: 15m - labels: - severity: warning - - alert: KubeAPIDown - annotations: - description: KubeAPI has disappeared from Prometheus target discovery. - summary: Target disappeared from Prometheus target discovery. - expr: | - absent(up{job="apiserver"} == 1) - for: 15m - labels: - severity: critical - - alert: KubeAPITerminatedRequests - annotations: - description: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. - summary: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. - expr: | - sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20 - for: 5m - labels: - severity: warning - - name: kubernetes-system-kubelet - rules: - - alert: KubeNodeNotReady - annotations: - description: '{{ $labels.node }} has been unready for more than 15 minutes.' - summary: Node is not ready. - expr: | - kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 - for: 15m - labels: - severity: warning - - alert: KubeNodeUnreachable - annotations: - description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.' - summary: Node is unreachable. - expr: | - (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1 - for: 15m - labels: - severity: warning - - alert: KubeletTooManyPods - annotations: - description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity. - summary: Kubelet is running at capacity. - expr: | - count by(node) ( - (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} == 1) * on(instance,pod,namespace,cluster) group_left(node) topk by(instance,pod,namespace,cluster) (1, kube_pod_info{job="kube-state-metrics"}) - ) - / - max by(node) ( - kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1 - ) > 0.95 - for: 15m - labels: - severity: warning - - alert: KubeNodeReadinessFlapping - annotations: - description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes. - summary: Node readiness status is flapping. - expr: | - sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (node) > 2 - for: 15m - labels: - severity: warning - - alert: KubeletPlegDurationHigh - annotations: - description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}. - summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist. - expr: | - node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 - for: 5m - labels: - severity: warning - # - alert: KubeletPodStartUpLatencyHigh - # annotations: - # description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}. - # summary: Kubelet Pod startup latency is too high. - # expr: | - # histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 - # for: 15m - # labels: - # severity: warning - - alert: KubeletClientCertificateRenewalErrors - annotations: - description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes). - summary: Kubelet has failed to renew its client certificate. - expr: | - increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0 - for: 15m - labels: - severity: warning - - alert: KubeletServerCertificateRenewalErrors - annotations: - description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes). - summary: Kubelet has failed to renew its server certificate. - expr: | - increase(kubelet_server_expiration_renew_errors[5m]) > 0 - for: 15m - labels: - severity: warning - # - alert: KubeletDown - # annotations: - # description: Kubelet has disappeared from Prometheus target discovery. - # summary: Target disappeared from Prometheus target discovery. - # expr: | - # absent(up{job="kubelet", metrics_path="/metrics"} == 1) - # for: 15m - # labels: - # severity: critical - - name: node-exporter - rules: - - alert: NodeFilesystemSpaceFillingUp - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up. - summary: Filesystem is predicted to run out of space within the next 24 hours. - expr: | - ( - node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 40 - and - predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: warning - - alert: NodeFilesystemSpaceFillingUp - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast. - summary: Filesystem is predicted to run out of space within the next 4 hours. - expr: | - ( - node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 15 - and - predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: critical - - alert: NodeFilesystemAlmostOutOfSpace - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. - summary: Filesystem has less than 5% space left. - expr: | - ( - node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 5 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: warning - - alert: NodeFilesystemAlmostOutOfSpace - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. - summary: Filesystem has less than 3% space left. - expr: | - ( - node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 3 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: critical - - alert: NodeFilesystemFilesFillingUp - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up. - summary: Filesystem is predicted to run out of inodes within the next 24 hours. - expr: | - ( - node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 40 - and - predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: warning - - alert: NodeFilesystemFilesFillingUp - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast. - summary: Filesystem is predicted to run out of inodes within the next 4 hours. - expr: | - ( - node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 20 - and - predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: critical - - alert: NodeFilesystemAlmostOutOfFiles - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. - summary: Filesystem has less than 5% inodes left. - expr: | - ( - node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 5 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: warning - - alert: NodeFilesystemAlmostOutOfFiles - annotations: - description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. - summary: Filesystem has less than 3% inodes left. - expr: | - ( - node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 3 - and - node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 - ) - for: 1h - labels: - severity: critical - - alert: NodeNetworkReceiveErrs - annotations: - description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' - summary: Network interface is reporting many receive errors. - expr: | - rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 - for: 1h - labels: - severity: warning - - alert: NodeNetworkTransmitErrs - annotations: - description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' - summary: Network interface is reporting many transmit errors. - expr: | - rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 - for: 1h - labels: - severity: warning - - alert: NodeHighNumberConntrackEntriesUsed - annotations: - description: '{{ $value | humanizePercentage }} of conntrack entries are used.' - summary: Number of conntrack are getting close to the limit. - expr: | - (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 - labels: - severity: warning - - alert: NodeTextFileCollectorScrapeError - annotations: - description: Node Exporter text file collector failed to scrape. - summary: Node Exporter text file collector failed to scrape. - expr: | - node_textfile_scrape_error{job="node-exporter"} == 1 - labels: - severity: warning - - alert: NodeClockSkewDetected - annotations: - description: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. - summary: Clock skew detected. - expr: | - ( - node_timex_offset_seconds > 0.05 - and - deriv(node_timex_offset_seconds[5m]) >= 0 - ) - or - ( - node_timex_offset_seconds < -0.05 - and - deriv(node_timex_offset_seconds[5m]) <= 0 - ) - for: 10m - labels: - severity: warning - - alert: NodeClockNotSynchronising - annotations: - description: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host. - summary: Clock not synchronising. - expr: | - min_over_time(node_timex_sync_status[5m]) == 0 - and - node_timex_maxerror_seconds >= 16 - for: 10m - labels: - severity: warning - - alert: NodeRAIDDegraded - annotations: - description: RAID array '{{ $labels.device }}' on {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. - summary: RAID Array is degraded - expr: | - node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 - for: 15m - labels: - severity: critical - - alert: NodeRAIDDiskFailure - annotations: - description: At least one device in RAID array on {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap. - summary: Failed device in RAID array - expr: | - node_md_disks{state="failed"} > 0 - labels: - severity: warning - - name: prometheus ## prometheus - rules: - - alert: PrometheusBadConfig - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration. - summary: Failed Prometheus configuration reload. - expr: | - # Without max_over_time, failed scrapes could create false negatives, see - # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. - max_over_time(prometheus_config_last_reload_successful{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) == 0 - for: 10m - labels: - severity: critical - - alert: PrometheusNotificationQueueRunningFull - annotations: - description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full. - summary: Prometheus alert notification queue predicted to run full in less than 30m. - expr: | - # Without min_over_time, failed scrapes could create false negatives, see - # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. - ( - predict_linear(prometheus_notifications_queue_length{job=~"prometheus-k8s|prometheus-user-workload"}[5m], 60 * 30) - > - min_over_time(prometheus_notifications_queue_capacity{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - ) - for: 15m - labels: - severity: warning - - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers - annotations: - description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.' - summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. - expr: | - ( - rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - / - rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - ) - * 100 - > 1 - for: 15m - labels: - severity: warning - - alert: PrometheusNotConnectedToAlertmanagers - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers. - summary: Prometheus is not connected to any Alertmanagers. - expr: | - # Without max_over_time, failed scrapes could create false negatives, see - # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. - max_over_time(prometheus_notifications_alertmanagers_discovered{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) < 1 - for: 10m - labels: - severity: warning - - alert: PrometheusTSDBReloadsFailing - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h. - summary: Prometheus has issues reloading blocks from disk. - expr: | - increase(prometheus_tsdb_reloads_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 - for: 4h - labels: - severity: warning - - alert: PrometheusTSDBCompactionsFailing - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h. - summary: Prometheus has issues compacting blocks. - expr: | - increase(prometheus_tsdb_compactions_failed_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 - for: 4h - labels: - severity: warning - - alert: PrometheusNotIngestingSamples - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples. - summary: Prometheus is not ingesting samples. - expr: | - ( - rate(prometheus_tsdb_head_samples_appended_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) <= 0 - and - ( - sum without(scrape_job) (prometheus_target_metadata_cache_entries{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 - or - sum without(rule_group) (prometheus_rule_group_rules{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 - ) - ) - for: 10m - labels: - severity: warning - - alert: PrometheusDuplicateTimestamps - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp. - summary: Prometheus is dropping samples with duplicate timestamps. - expr: | - rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 - for: 1h - labels: - severity: warning - - alert: PrometheusOutOfOrderTimestamps - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order. - summary: Prometheus drops samples with out-of-order timestamps. - expr: | - rate(prometheus_target_scrapes_sample_out_of_order_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 - for: 1h - labels: - severity: warning - - alert: PrometheusRemoteStorageFailures - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }} - summary: Prometheus fails to send samples to remote storage. - expr: | - ( - rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - / - ( - rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - + - rate(prometheus_remote_storage_succeeded_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - ) - ) - * 100 - > 1 - for: 15m - labels: - severity: critical - - alert: PrometheusRemoteWriteBehind - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}. - summary: Prometheus remote write is behind. - expr: | - # Without max_over_time, failed scrapes could create false negatives, see - # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. - ( - max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - - ignoring(remote_name, url) group_right - max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - ) - > 120 - for: 15m - labels: - severity: critical - - alert: PrometheusRemoteWriteDesiredShards - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job=~"prometheus-k8s|prometheus-user-workload"}` $labels.instance | query | first | value }}. - summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. - expr: | - # Without max_over_time, failed scrapes could create false negatives, see - # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. - ( - max_over_time(prometheus_remote_storage_shards_desired{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - > - max_over_time(prometheus_remote_storage_shards_max{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) - ) - for: 15m - labels: - severity: warning - - alert: PrometheusRuleFailures - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m. - summary: Prometheus is failing rule evaluations. - expr: | - increase(prometheus_rule_evaluation_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 - for: 15m - labels: - severity: critical - - alert: PrometheusMissingRuleEvaluations - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m. - summary: Prometheus is missing rule evaluations due to slow rule group evaluation. - expr: | - increase(prometheus_rule_group_iterations_missed_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 - for: 15m - labels: - severity: warning - - alert: PrometheusTargetLimitHit - annotations: - description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit. - summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit. - expr: | - increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 - for: 15m - labels: - severity: warning - - alert: PrometheusErrorSendingAlertsToAnyAlertmanager - annotations: - description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.' - summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. - expr: | - min without (alertmanager) ( - rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) - / - rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) - ) - * 100 - > 3 - for: 15m - labels: - severity: critical diff --git a/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml b/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml deleted file mode 100644 index a630fe5c6..000000000 --- a/operators/endpointmetrics/manifests/prometheus/kubernetes-monitoring-rules.yaml +++ /dev/null @@ -1,687 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-monitoring-rules - namespace: open-cluster-management-addon-observability -data: - kubernetes-monitoring-rules.yaml: | - groups: - - name: kube-apiserver-burnrate.rules - rules: - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1d])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1d])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1d])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) - labels: - verb: read - record: apiserver_request:burnrate1d - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1h])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1h])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1h])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) - labels: - verb: read - record: apiserver_request:burnrate1h - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[2h])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[2h])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[2h])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) - labels: - verb: read - record: apiserver_request:burnrate2h - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30m])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30m])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[30m])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) - labels: - verb: read - record: apiserver_request:burnrate30m - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[3d])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[3d])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[3d])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) - labels: - verb: read - record: apiserver_request:burnrate3d - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[5m])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[5m])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[5m])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) - labels: - verb: read - record: apiserver_request:burnrate5m - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) - - - ( - ( - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[6h])) - or - vector(0) - ) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[6h])) - + - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[6h])) - ) - ) - + - # errors - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) - labels: - verb: read - record: apiserver_request:burnrate6h - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) - labels: - verb: write - record: apiserver_request:burnrate1d - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) - labels: - verb: write - record: apiserver_request:burnrate1h - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) - labels: - verb: write - record: apiserver_request:burnrate2h - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) - labels: - verb: write - record: apiserver_request:burnrate30m - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) - labels: - verb: write - record: apiserver_request:burnrate3d - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) - labels: - verb: write - record: apiserver_request:burnrate5m - - expr: | - ( - ( - # too slow - sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) - - - sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) - ) - + - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) - ) - / - sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) - labels: - verb: write - record: apiserver_request:burnrate6h - - name: kube-apiserver-histogram.rules - rules: - - expr: | - histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 - labels: - quantile: "0.99" - verb: read - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 - labels: - quantile: "0.99" - verb: write - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.99" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.9" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) - labels: - quantile: "0.5" - record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - - interval: 3m - name: kube-apiserver-availability.rules - rules: - - expr: | - avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30 - record: code_verb:apiserver_request_total:increase30d - - expr: | - sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) - labels: - verb: read - record: code:apiserver_request_total:increase30d - - expr: | - sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) - labels: - verb: write - record: code:apiserver_request_total:increase30d - - expr: | - 1 - ( - ( - # write too slow - sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) - - - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) - ) + - ( - # read too slow - sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"LIST|GET"}[30d])) - - - ( - ( - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope=~"resource|",le="1"}[30d])) - or - vector(0) - ) - + - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="namespace",le="5"}[30d])) - + - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="cluster",le="40"}[30d])) - ) - ) + - # errors - sum by (cluster) (code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)) - ) - / - sum by (cluster) (code:apiserver_request_total:increase30d) - labels: - verb: all - record: apiserver_request:availability30d - - expr: | - 1 - ( - sum by (cluster) (increase(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30d])) - - - ( - # too slow - ( - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30d])) - or - vector(0) - ) - + - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30d])) - + - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[30d])) - ) - + - # errors - sum by (cluster) (code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0)) - ) - / - sum by (cluster) (code:apiserver_request_total:increase30d{verb="read"}) - labels: - verb: read - record: apiserver_request:availability30d - - expr: | - 1 - ( - ( - # too slow - sum by (cluster) (increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) - - - sum by (cluster) (increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) - ) - + - # errors - sum by (cluster) (code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0)) - ) - / - sum by (cluster) (code:apiserver_request_total:increase30d{verb="write"}) - labels: - verb: write - record: apiserver_request:availability30d - - expr: | - sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) - labels: - verb: read - record: code_resource:apiserver_request_total:rate5m - - expr: | - sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) - labels: - verb: write - record: code_resource:apiserver_request_total:rate5m - - expr: | - sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h])) - record: code_verb:apiserver_request_total:increase1h - - expr: | - sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h])) - record: code_verb:apiserver_request_total:increase1h - - expr: | - sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h])) - record: code_verb:apiserver_request_total:increase1h - - expr: | - sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) - record: code_verb:apiserver_request_total:increase1h - - name: k8s.rules - rules: - - expr: | - sum by (cluster, namespace, pod, container) ( - rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) - ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( - 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate - - expr: | - sum by (cluster, namespace, pod, container) ( - irate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) - ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( - 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate - - expr: | - container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} - * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, - max by(namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_memory_working_set_bytes - - expr: | - container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} - * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, - max by(namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_memory_rss - - expr: | - container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} - * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, - max by(namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_memory_cache - - expr: | - container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} - * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, - max by(namespace, pod, node) (kube_pod_info{node!=""}) - ) - record: node_namespace_pod_container:container_memory_swap - - expr: | - kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) - group_left() max by (namespace, pod) ( - (kube_pod_status_phase{phase=~"Pending|Running"} == 1) - ) - record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests - - expr: | - sum by (namespace, cluster) ( - sum by (namespace, pod, cluster) ( - max by (namespace, pod, container, cluster) ( - kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} - ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( - kube_pod_status_phase{phase=~"Pending|Running"} == 1 - ) - ) - ) - record: namespace_memory:kube_pod_container_resource_requests:sum - - expr: | - kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) - group_left() max by (namespace, pod) ( - (kube_pod_status_phase{phase=~"Pending|Running"} == 1) - ) - record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests - - expr: | - sum by (namespace, cluster) ( - sum by (namespace, pod, cluster) ( - max by (namespace, pod, container, cluster) ( - kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} - ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( - kube_pod_status_phase{phase=~"Pending|Running"} == 1 - ) - ) - ) - record: namespace_cpu:kube_pod_container_resource_requests:sum - - expr: | - kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) - group_left() max by (namespace, pod) ( - (kube_pod_status_phase{phase=~"Pending|Running"} == 1) - ) - record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits - - expr: | - sum by (namespace, cluster) ( - sum by (namespace, pod, cluster) ( - max by (namespace, pod, container, cluster) ( - kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} - ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( - kube_pod_status_phase{phase=~"Pending|Running"} == 1 - ) - ) - ) - record: namespace_memory:kube_pod_container_resource_limits:sum - - expr: | - kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) - group_left() max by (namespace, pod) ( - (kube_pod_status_phase{phase=~"Pending|Running"} == 1) - ) - record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits - - expr: | - sum by (namespace, cluster) ( - sum by (namespace, pod, cluster) ( - max by (namespace, pod, container, cluster) ( - kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} - ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( - kube_pod_status_phase{phase=~"Pending|Running"} == 1 - ) - ) - ) - record: namespace_cpu:kube_pod_container_resource_limits:sum - - expr: | - max by (cluster, namespace, workload, pod) ( - label_replace( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, - "replicaset", "$1", "owner_name", "(.*)" - ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( - 1, max by (replicaset, namespace, owner_name) ( - kube_replicaset_owner{job="kube-state-metrics"} - ) - ), - "workload", "$1", "owner_name", "(.*)" - ) - ) - labels: - workload_type: deployment - record: namespace_workload_pod:kube_pod_owner:relabel - - expr: | - max by (cluster, namespace, workload, pod) ( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, - "workload", "$1", "owner_name", "(.*)" - ) - ) - labels: - workload_type: daemonset - record: namespace_workload_pod:kube_pod_owner:relabel - - expr: | - max by (cluster, namespace, workload, pod) ( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, - "workload", "$1", "owner_name", "(.*)" - ) - ) - labels: - workload_type: statefulset - record: namespace_workload_pod:kube_pod_owner:relabel - - name: kube-scheduler.rules - rules: - - expr: | - histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.99" - record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.99" - record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.99" - record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.9" - record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.9" - record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.9" - record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.5" - record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.5" - record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) - labels: - quantile: "0.5" - record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - - name: node.rules - rules: - - expr: | - topk by(namespace, pod) (1, - max by (node, namespace, pod) ( - label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") - )) - record: 'node_namespace_pod:kube_pod_info:' - - expr: | - count by (cluster, node) (sum by (node, cpu) ( - node_cpu_seconds_total{job="node-exporter"} - * on (namespace, pod) group_left(node) - topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:) - )) - record: node:node_num_cpu:sum - - expr: | - sum( - node_memory_MemAvailable_bytes{job="node-exporter"} or - ( - node_memory_Buffers_bytes{job="node-exporter"} + - node_memory_Cached_bytes{job="node-exporter"} + - node_memory_MemFree_bytes{job="node-exporter"} + - node_memory_Slab_bytes{job="node-exporter"} - ) - ) by (cluster) - record: :node_memory_MemAvailable_bytes:sum - - name: kubelet.rules - rules: - - expr: | - histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) - labels: - quantile: "0.99" - record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) - labels: - quantile: "0.9" - record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: | - histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) - labels: - quantile: "0.5" - record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/kustomization.yaml b/operators/endpointmetrics/manifests/prometheus/kustomization.yaml index 605f8bb6d..d42a233e3 100644 --- a/operators/endpointmetrics/manifests/prometheus/kustomization.yaml +++ b/operators/endpointmetrics/manifests/prometheus/kustomization.yaml @@ -1,27 +1,29 @@ resources: -- kube-prometheus-rules.yaml - kube-state-metrics-clusterRole.yaml - kube-state-metrics-clusterRoleBinding.yaml - kube-state-metrics-deployment.yaml - kube-state-metrics-service.yaml - kube-state-metrics-serviceAccount.yaml -- kubernetes-monitoring-rules.yaml -- kubernetes-monitoring-alertingrules.yaml - node-exporter-clusterRole.yaml - node-exporter-clusterRoleBinding.yaml - node-exporter-daemonset.yaml -- node-exporter-rules.yaml - node-exporter-service.yaml - node-exporter-serviceAccount.yaml +- prometheus-alertmanager-config-secret.yaml - prometheus-clusterRole.yaml - prometheus-clusterRoleBinding.yaml -- prometheus-config.yaml +- prometheus-operator-deployment.yaml +- prometheus-operator-role.yaml +- prometheus-operator-roleBinding.yaml +- prometheus-operator-serviceAccount.yaml +- prometheus-resource.yaml - prometheus-role.yaml - prometheus-role-default.yaml - prometheus-role-kube-system.yaml - prometheus-roleBinding.yaml - prometheus-roleBinding-default.yaml - prometheus-roleBinding-kube-system.yaml +- prometheus-scrape-targets-secret.yaml - prometheus-service.yaml - prometheus-serviceAccount.yaml -- prometheus-statefulset.yaml + diff --git a/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml b/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml deleted file mode 100644 index 77352962e..000000000 --- a/operators/endpointmetrics/manifests/prometheus/node-exporter-rules.yaml +++ /dev/null @@ -1,65 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: node-exporter-rules - namespace: open-cluster-management-addon-observability -data: - node-exporter-rules.yaml: | - groups: - - name: node-exporter.rules - rules: - - expr: | - count without (cpu) ( - count without (mode) ( - node_cpu_seconds_total{job="node-exporter"} - ) - ) - record: instance:node_num_cpu:sum - - expr: | - 1 - avg without (cpu, mode) ( - rate(node_cpu_seconds_total{job="node-exporter", mode="idle"}[1m]) - ) - record: instance:node_cpu_utilisation:rate1m - - expr: | - ( - node_load1{job="node-exporter"} - / - instance:node_num_cpu:sum{job="node-exporter"} - ) - record: instance:node_load1_per_cpu:ratio - - expr: | - 1 - ( - node_memory_MemAvailable_bytes{job="node-exporter"} - / - node_memory_MemTotal_bytes{job="node-exporter"} - ) - record: instance:node_memory_utilisation:ratio - - expr: | - rate(node_vmstat_pgmajfault{job="node-exporter"}[1m]) - record: instance:node_vmstat_pgmajfault:rate1m - - expr: | - rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) - record: instance_device:node_disk_io_time_seconds:rate1m - - expr: | - rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) - record: instance_device:node_disk_io_time_weighted_seconds:rate1m - - expr: | - sum without (device) ( - rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[1m]) - ) - record: instance:node_network_receive_bytes_excluding_lo:rate1m - - expr: | - sum without (device) ( - rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[1m]) - ) - record: instance:node_network_transmit_bytes_excluding_lo:rate1m - - expr: | - sum without (device) ( - rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[1m]) - ) - record: instance:node_network_receive_drop_excluding_lo:rate1m - - expr: | - sum without (device) ( - rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[1m]) - ) - record: instance:node_network_transmit_drop_excluding_lo:rate1m \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-alertmanager-config-secret.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-alertmanager-config-secret.yaml new file mode 100644 index 000000000..bab7c3e31 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-alertmanager-config-secret.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Secret +metadata: + name: prometheus-alertmanager + namespace: open-cluster-management-addon-observability +type: Opaque +stringData: + alertmanager.yaml: |- + - authorization: + type: Bearer + credentials_file: /etc/prometheus/secrets/observability-alertmanager-accessor/token + tls_config: + ca_file: /etc/prometheus/secrets/hub-alertmanager-router-ca/service-ca.crt + server_name: "" + insecure_skip_verify: false + follow_redirects: true + scheme: https + path_prefix: / + timeout: 10s + api_version: v2 + static_configs: + - targets: + - _ALERTMANAGER_ENDPOINT_ \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml new file mode 100644 index 000000000..bb75550a8 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: prometheus-operator + app.kubernetes.io/name: prometheus-operator + name: prometheus-operator + namespace: open-cluster-management-addon-observability +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: prometheus-operator + app.kubernetes.io/name: prometheus-operator + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: prometheus-operator + labels: + app.kubernetes.io/component: prometheus-operator + app.kubernetes.io/name: prometheus-operator + spec: + containers: + - args: + - '--kubelet-service=kube-system/kubelet' + - '--prometheus-config-reloader={{PROM_CONFIGMAP_RELOADER_IMG}}' + - '--namespaces={{NAMESPACE}}' + image: quay.io/prometheus-operator/prometheus-operator:v0.53.1 + imagePullPolicy: IfNotPresent + name: prometheus-operator + ports: + - name: http + containerPort: 8080 + protocol: TCP + resources: + requests: + cpu: 5m + memory: 150Mi + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccount: prometheus-operator + serviceAccountName: prometheus-operator + terminationGracePeriodSeconds: 600 diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-role.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-role.yaml new file mode 100644 index 000000000..81d2e031d --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-role.yaml @@ -0,0 +1,90 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: acm-prometheus-operator-role + labels: + app.kubernetes.io/component: prometheus-operator + app.kubernetes.io/name: prometheus-operator +rules: + - verbs: + - '*' + apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - alertmanagers/finalizers + - alertmanagerconfigs + - prometheuses + - prometheuses/finalizers + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - probes + - prometheusrules + - verbs: + - '*' + apiGroups: + - apps + resources: + - statefulsets + - verbs: + - '*' + apiGroups: + - '' + resources: + - configmaps + - secrets + - verbs: + - list + - delete + apiGroups: + - '' + resources: + - pods + - verbs: + - get + - create + - update + - delete + apiGroups: + - '' + resources: + - services + - services/finalizers + - endpoints + - verbs: + - list + - watch + apiGroups: + - '' + resources: + - nodes + - verbs: + - get + - list + - watch + apiGroups: + - '' + resources: + - namespaces + - verbs: + - get + - list + - watch + apiGroups: + - networking.k8s.io + resources: + - ingresses + - verbs: + - create + apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + - verbs: + - create + apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-roleBinding.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-roleBinding.yaml new file mode 100644 index 000000000..28d3c3f75 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-roleBinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: acm-prometheus-operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: acm-prometheus-operator-role +subjects: +- kind: ServiceAccount + name: prometheus-operator + namespace: open-cluster-management-addon-observability + diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-operator-serviceAccount.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-serviceAccount.yaml new file mode 100644 index 000000000..b4548459a --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-operator-serviceAccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus-operator + namespace: open-cluster-management-addon-observability diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-resource.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-resource.yaml new file mode 100644 index 000000000..f05fed4af --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-resource.yaml @@ -0,0 +1,73 @@ +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + prometheus: k8s + name: k8s + namespace: open-cluster-management-addon-observability +spec: + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: prometheus-k8s + secrets: + - hub-alertmanager-router-ca + - observability-alertmanager-accessor + serviceMonitorSelector: {} + resources: + requests: + memory: 400Mi + externalLabels: + cluster: CLUSTER + listenLocal: true + containers: + - args: + - --logtostderr + - --secure-listen-address=[$(IP)]:9091 + - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + - --upstream=http://127.0.0.1:9090/ + env: + - name: IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: quay.io/stolostron/kube-rbac-proxy:2.5.0-SNAPSHOT-2022-01-25-02-13-09 + name: kube-rbac-proxy + ports: + - containerPort: 9091 + name: https + resources: + limits: + cpu: 20m + memory: 40Mi + requests: + cpu: 10m + memory: 20Mi + - name: config-reloader + resources: + requests: + cpu: 1m + memory: 10Mi + podMetadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + labels: + app.kubernetes.io/component: prometheus + app.kubernetes.io/name: prometheus + podMonitorSelector: {} + serviceMonitorNamespaceSelector: {} + rules: + alert: {} + arbitraryFSAccessThroughSMs: {} + image: quay.io/stolostron/prometheus:2.5.0-SNAPSHOT-2022-01-25-02-13-09 + replicas: 1 + ruleSelector: {} + ruleNamespaceSelector: {} + retention: 24h + additionalScrapeConfigs: + name: prometheus-scrape-targets + key: scrape-targets.yaml + additionalAlertManagerConfigs: + name: prometheus-alertmanager + key: alertmanager.yaml \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-scrape-targets-secret.yaml similarity index 97% rename from operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml rename to operators/endpointmetrics/manifests/prometheus/prometheus-scrape-targets-secret.yaml index 522698870..7d56d88c5 100644 --- a/operators/endpointmetrics/manifests/prometheus/prometheus-config.yaml +++ b/operators/endpointmetrics/manifests/prometheus/prometheus-scrape-targets-secret.yaml @@ -1,40 +1,11 @@ -kind: ConfigMap apiVersion: v1 +kind: Secret metadata: - name: prometheus-k8s-config + name: prometheus-scrape-targets namespace: open-cluster-management-addon-observability -data: - prometheus.yaml: | - global: - evaluation_interval: 30s - scrape_interval: 5m - external_labels: - cluster: _CLUSTERID_ - alerting: - alert_relabel_configs: - - separator: ; - regex: prometheus_replica - replacement: $1 - action: labeldrop - alertmanagers: - - authorization: - type: Bearer - credentials_file: /etc/prometheus/secrets/observability-alertmanager-accessor/token - tls_config: - ca_file: /etc/prometheus/secrets/hub-alertmanager-router-ca/service-ca.crt - server_name: "" - insecure_skip_verify: false - follow_redirects: true - scheme: https - path_prefix: / - timeout: 10s - api_version: v2 - static_configs: - - targets: - - _ALERTMANAGER_ENDPOINT_ - rule_files: - - /etc/prometheus/rules/prometheus-k8s-rulefiles-0/*.yaml - scrape_configs: +type: Opaque +stringData: + scrape-targets.yaml: |- - job_name: serviceMonitor/open-cluster-management-addon-observability/coredns/0 honor_labels: false kubernetes_sd_configs: diff --git a/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml b/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml deleted file mode 100644 index b8a2501f7..000000000 --- a/operators/endpointmetrics/manifests/prometheus/prometheus-statefulset.yaml +++ /dev/null @@ -1,165 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - app.kubernetes.io/component: prometheus - app.kubernetes.io/name: prometheus - name: prometheus-k8s - namespace: open-cluster-management-addon-observability -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: prometheus - app.kubernetes.io/name: prometheus - serviceName: prometheus-operated - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: prometheus - labels: - app.kubernetes.io/component: prometheus - app.kubernetes.io/name: prometheus - spec: - containers: - - args: - - --config.file=/etc/prometheus/config_out/prometheus.yaml - - --storage.tsdb.path=/prometheus - - --storage.tsdb.retention.time=24h - - --web.enable-lifecycle - - --web.route-prefix=/ - image: quay.io/stolostron/prometheus:2.4.0-SNAPSHOT-2021-08-11-14-15-20 - imagePullPolicy: IfNotPresent - name: prometheus - ports: - - containerPort: 9090 - name: web - protocol: TCP - readinessProbe: - failureThreshold: 120 - httpGet: - path: /-/ready - port: web - scheme: HTTP - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 3 - resources: - requests: - memory: 400Mi - volumeMounts: - - mountPath: /prometheus - name: prometheus-k8s-db - - mountPath: /etc/prometheus/config_out - name: config-out - readOnly: true - - name: node-exporter-rules - mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/node-exporter-rules.yaml - subPath: node-exporter-rules.yaml - - name: kubernetes-monitoring-rules - mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubernetes-monitoring-rules.yaml - subPath: kubernetes-monitoring-rules.yaml - - name: kube-prometheus-rules - mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kube-prometheus-rules.yaml - subPath: kube-prometheus-rules.yaml - - name: kubernetes-monitoring-alertingrules - mountPath: etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubernetes-monitoring-alertingrules.yaml - subPath: kubernetes-monitoring-alertingrules.yaml - - name: observability-alertmanager-accessor - mountPath: /etc/prometheus/secrets/observability-alertmanager-accessor - readOnly: true - - name: hub-alertmanager-router-ca - mountPath: /etc/prometheus/secrets/hub-alertmanager-router-ca - readOnly: true - - args: - - --logtostderr - - --secure-listen-address=[$(IP)]:9091 - - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - --upstream=http://127.0.0.1:9090/ - env: - - name: IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: quay.io/stolostron/kube-rbac-proxy:2.4.0-SNAPSHOT-2021-08-11-14-15-20 - name: kube-rbac-proxy - ports: - - containerPort: 9091 - hostPort: 9091 - name: https - resources: - limits: - cpu: 20m - memory: 40Mi - requests: - cpu: 10m - memory: 20Mi - securityContext: - runAsGroup: 65532 - runAsNonRoot: true - runAsUser: 65532 - - args: - - -webhook-url=http://localhost:9090/-/reload - - -volume-dir=/etc/prometheus/secrets/hub-alertmanager-router-ca - - -volume-dir=/etc/prometheus/secrets/observability-alertmanager-accessor - - -volume-dir=/etc/prometheus/config_out - image: quay.io/openshift/origin-configmap-reloader:4.5.0 - imagePullPolicy: IfNotPresent - name: config-reloader - resources: - requests: - cpu: 4m - memory: 25Mi - volumeMounts: - - mountPath: /etc/prometheus/config_out - name: config-out - readOnly: true - - name: observability-alertmanager-accessor - mountPath: /etc/prometheus/secrets/observability-alertmanager-accessor - readOnly: true - - name: hub-alertmanager-router-ca - mountPath: /etc/prometheus/secrets/hub-alertmanager-router-ca - readOnly: true - dnsPolicy: ClusterFirst - nodeSelector: - kubernetes.io/os: linux - restartPolicy: Always - schedulerName: default-scheduler - securityContext: - fsGroup: 2000 - runAsNonRoot: true - runAsUser: 1000 - serviceAccount: prometheus-k8s - serviceAccountName: prometheus-k8s - terminationGracePeriodSeconds: 600 - volumes: - - emptyDir: {} - name: prometheus-k8s-db - - configMap: - defaultMode: 420 - name: prometheus-k8s-config - name: config-out - - configMap: - defaultMode: 420 - name: node-exporter-rules - name: node-exporter-rules - - configMap: - defaultMode: 420 - name: kubernetes-monitoring-rules - name: kubernetes-monitoring-rules - - configMap: - defaultMode: 420 - name: kube-prometheus-rules - name: kube-prometheus-rules - - configMap: - defaultMode: 420 - name: kubernetes-monitoring-alertingrules - name: kubernetes-monitoring-alertingrules - - secret: - defaultMode: 420 - secretName: observability-alertmanager-accessor - name: observability-alertmanager-accessor - - secret: - defaultMode: 420 - secretName: hub-alertmanager-router-ca - name: hub-alertmanager-router-ca diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/k8s.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/k8s.yaml new file mode 100644 index 000000000..426c85da0 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/k8s.yaml @@ -0,0 +1,147 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/k8s.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: k8s-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: k8s.rules + rules: + - expr: |- + sum by (cluster, namespace, pod, container) ( + irate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!=""}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate + - expr: |- + container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_working_set_bytes + - expr: |- + container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_rss + - expr: |- + container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_cache + - expr: |- + container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_swap + - expr: |- + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_requests + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_requests:sum + - expr: |- + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_requests:sum + - expr: |- + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_memory:active:kube_pod_container_resource_limits + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="memory",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_memory:kube_pod_container_resource_limits:sum + - expr: |- + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} * on (namespace, pod, cluster) + group_left() max by (namespace, pod, cluster) ( + (kube_pod_status_phase{phase=~"Pending|Running"} == 1) + ) + record: cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits + - expr: |- + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_limits{resource="cpu",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod, cluster) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace_cpu:kube_pod_container_resource_limits:sum + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( + 1, max by (replicaset, namespace, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics"} + ) + ), + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: deployment + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: daemonset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: statefulset + record: namespace_workload_pod:kube_pod_owner:relabel \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-availability.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-availability.yaml new file mode 100644 index 000000000..3d400d6b3 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-availability.yaml @@ -0,0 +1,119 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-apiserver-availability-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - interval: 3m + name: kube-apiserver-availability.rules + rules: + - expr: avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30 + record: code_verb:apiserver_request_total:increase30d + - expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) + labels: + verb: read + record: code:apiserver_request_total:increase30d + - expr: sum by (cluster, code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + labels: + verb: write + record: code:apiserver_request_total:increase30d + - expr: sum by (cluster, verb, scope) (increase(apiserver_request_duration_seconds_count[1h])) + record: cluster_verb_scope:apiserver_request_duration_seconds_count:increase1h + - expr: sum by (cluster, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_duration_seconds_count:increase1h[30d]) * 24 * 30) + record: cluster_verb_scope:apiserver_request_duration_seconds_count:increase30d + - expr: sum by (cluster, verb, scope, le) (increase(apiserver_request_duration_seconds_bucket[1h])) + record: cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase1h + - expr: sum by (cluster, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase1h[30d]) * 24 * 30) + record: cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d + - expr: |- + 1 - ( + ( + # write too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + - + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"}) + ) + + ( + # read too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_duration_seconds_count:increase30d{verb=~"LIST|GET"}) + - + ( + ( + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"}) + or + vector(0) + ) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"}) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"}) + ) + ) + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d) + labels: + verb: all + record: apiserver_request:availability30d + - expr: |- + 1 - ( + sum by (cluster) (cluster_verb_scope:apiserver_request_duration_seconds_count:increase30d{verb=~"LIST|GET"}) + - + ( + # too slow + ( + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope=~"resource|",le="1"}) + or + vector(0) + ) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="namespace",le="5"}) + + + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"LIST|GET",scope="cluster",le="30"}) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="read"}) + labels: + verb: read + record: apiserver_request:availability30d + - expr: |- + 1 - ( + ( + # too slow + sum by (cluster) (cluster_verb_scope:apiserver_request_duration_seconds_count:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + - + sum by (cluster) (cluster_verb_scope_le:apiserver_request_duration_seconds_bucket:increase30d{verb=~"POST|PUT|PATCH|DELETE",le="1"}) + ) + + + # errors + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0)) + ) + / + sum by (cluster) (code:apiserver_request_total:increase30d{verb="write"}) + labels: + verb: write + record: apiserver_request:availability30d + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (cluster, code, verb) (increase(apiserver_request_total{job="apiserver",verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-histogram.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-histogram.yaml new file mode 100644 index 000000000..52cf742e0 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver-histogram.yaml @@ -0,0 +1,38 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver-histogram.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-apiserver-histogram-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-apiserver-histogram.rules + rules: + - expr: | + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 + labels: + quantile: "0.99" + verb: read + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 + labels: + quantile: "0.99" + verb: write + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver.yaml new file mode 100644 index 000000000..d0d2bbbbf --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-apiserver.yaml @@ -0,0 +1,342 @@ +#https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-apiserver-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-apiserver-burnrate.rules + rules: + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1d])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) + labels: + verb: read + record: apiserver_request:burnrate1d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[1h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[1h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[1h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) + labels: + verb: read + record: apiserver_request:burnrate1h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[2h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[2h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[2h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) + labels: + verb: read + record: apiserver_request:burnrate2h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[30m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[30m])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[30m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) + labels: + verb: read + record: apiserver_request:burnrate30m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[3d])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[3d])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[3d])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) + labels: + verb: read + record: apiserver_request:burnrate3d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[5m])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[5m])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[5m])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: apiserver_request:burnrate5m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) + - + ( + ( + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="1"}[6h])) + or + vector(0) + ) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="5"}[6h])) + + + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="40"}[6h])) + ) + ) + + + # errors + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) + labels: + verb: read + record: apiserver_request:burnrate6h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + labels: + verb: write + record: apiserver_request:burnrate1d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + labels: + verb: write + record: apiserver_request:burnrate1h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + labels: + verb: write + record: apiserver_request:burnrate2h + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + labels: + verb: write + record: apiserver_request:burnrate30m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + labels: + verb: write + record: apiserver_request:burnrate3d + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: apiserver_request:burnrate5m + - expr: | + ( + ( + # too slow + sum by (cluster) (rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + - + sum by (cluster) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) + ) + + + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) + ) + / + sum by (cluster) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + labels: + verb: write + record: apiserver_request:burnrate6h + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (cluster,code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: code_resource:apiserver_request_total:rate5m + - expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: read + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum by (cluster, le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: write + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.99' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.9' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.5' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-general.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-general.yaml new file mode 100644 index 000000000..fae34b09b --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-general.yaml @@ -0,0 +1,15 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-general + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-prometheus-general.rules + rules: + - expr: count without(instance, pod, node) (up == 1) + record: count:up1 + - expr: count without(instance, pod, node) (up == 0) + record: count:up0 \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-node-recording.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-node-recording.yaml new file mode 100644 index 000000000..ceb7ebe31 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-prometheus-node-recording.yaml @@ -0,0 +1,27 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-prometheus-node-recording-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-prometheus-node-recording.rules + rules: + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) + BY (instance) + record: instance:node_cpu:rate:sum + - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) + record: instance:node_network_receive_bytes:rate:sum + - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) + record: instance:node_network_transmit_bytes:rate:sum + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) + WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) + BY (instance, cpu)) BY (instance) + record: instance:node_cpu:ratio + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) + record: cluster:node_cpu:sum_rate5m + - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) + BY (instance, cpu)) + record: cluster:node_cpu:ratio diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-scheduler.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-scheduler.yaml new file mode 100644 index 000000000..55efe506d --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kube-scheduler.yaml @@ -0,0 +1,56 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kube-scheduler-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-scheduler.rules + rules: + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.99" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.9" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) + labels: + quantile: "0.5" + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubelet.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubelet.yaml new file mode 100644 index 000000000..434967b9f --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubelet.yaml @@ -0,0 +1,26 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/kubelet.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kubelet-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kubelet.rules + rules: + - expr: | + histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.99" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.9" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: | + histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: "0.5" + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubernetes-monitoring-alertingrules.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubernetes-monitoring-alertingrules.yaml new file mode 100644 index 000000000..5ce306659 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kubernetes-monitoring-alertingrules.yaml @@ -0,0 +1,945 @@ +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: kubernetes-monitoring-alertingrules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: kube-state-metrics + rules: + - alert: KubeStateMetricsListErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + summary: kube-state-metrics is experiencing errors in list operations. + expr: | + (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) + / + sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m]))) + > 0.01 + for: 15m + labels: + severity: critical + - alert: KubeStateMetricsWatchErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + summary: kube-state-metrics is experiencing errors in watch operations. + expr: | + (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) + / + sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m]))) + > 0.01 + for: 15m + labels: + severity: critical + - name: kubernetes-apps + rules: + - alert: KubePodCrashLooping + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf "%.2f" $value }} times / 10 minutes. + summary: Pod is crash looping. + expr: | + rate(kube_pod_container_status_restarts_total{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) * 60 * 5 > 0 + for: 15m + labels: + severity: warning + - alert: KubePodNotReady + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes. + summary: Pod has been in a non-ready state for more than 15 minutes. + expr: | + sum by (namespace, pod) ( + max by(namespace, pod) ( + kube_pod_status_phase{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", phase=~"Pending|Unknown"} + ) * on(namespace, pod) group_left(owner_kind) topk by(namespace, pod) ( + 1, max by(namespace, pod, owner_kind) (kube_pod_owner{owner_kind!="Job"}) + ) + ) > 0 + for: 15m + labels: + severity: warning + - alert: KubeDeploymentGenerationMismatch + annotations: + description: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back. + summary: Deployment generation mismatch due to possible roll-back + expr: | + kube_deployment_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_deployment_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetReplicasMismatch + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes. + summary: Deployment has not matched the expected number of replicas. + expr: | + ( + kube_statefulset_status_replicas_ready{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_status_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) and ( + changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[10m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetGenerationMismatch + annotations: + description: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back. + summary: StatefulSet generation mismatch due to possible roll-back + expr: | + kube_statefulset_status_observed_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_metadata_generation{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - alert: KubeStatefulSetUpdateNotRolledOut + annotations: + description: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out. + summary: StatefulSet update has not been rolled out. + expr: | + ( + max without (revision) ( + kube_statefulset_status_current_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + unless + kube_statefulset_status_update_revision{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + * + ( + kube_statefulset_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + ) and ( + changes(kube_statefulset_status_replicas_updated{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning + - alert: KubeDaemonSetRolloutStuck + annotations: + description: DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 30 minutes. + summary: DaemonSet rollout is stuck. + expr: | + ( + ( + kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) or ( + kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + 0 + ) or ( + kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) or ( + kube_daemonset_status_number_available{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + ) + ) and ( + changes(kube_daemonset_updated_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}[5m]) + == + 0 + ) + for: 30m + labels: + severity: warning + - alert: KubeContainerWaiting + annotations: + description: Pod {{ $labels.namespace }}/{{ $labels.pod }} container {{ $labels.container}} has been in waiting state for longer than 1 hour. + summary: Pod container waiting longer than 1 hour + expr: | + sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) > 0 + for: 1h + labels: + severity: warning + - alert: KubeDaemonSetNotScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.' + summary: DaemonSet pods are not scheduled. + expr: | + kube_daemonset_status_desired_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + - + kube_daemonset_status_current_number_scheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 10m + labels: + severity: warning + - alert: KubeDaemonSetMisScheduled + annotations: + description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.' + summary: DaemonSet pods are misscheduled. + expr: | + kube_daemonset_status_number_misscheduled{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeJobCompletion + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than 12 hours to complete. + summary: Job did not complete in time + expr: | + kube_job_spec_completions{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} - kube_job_status_succeeded{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 12h + labels: + severity: warning + - alert: KubeJobFailed + annotations: + description: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert. + summary: Job failed to complete. + expr: | + kube_job_failed{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaReplicasMismatch + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has not matched the desired number of replicas for longer than 15 minutes. + summary: HPA has not matched descired number of replicas. + expr: | + (kube_hpa_status_desired_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + != + kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + > + kube_hpa_spec_min_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + (kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + < + kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"}) + and + changes(kube_hpa_status_current_replicas[15m]) == 0 + for: 15m + labels: + severity: warning + - alert: KubeHpaMaxedOut + annotations: + description: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has been running at max replicas for longer than 15 minutes. + summary: HPA is running at max replicas + expr: | + kube_hpa_status_current_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + == + kube_hpa_spec_max_replicas{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} + for: 15m + labels: + severity: warning + - name: kubernetes-resources + rules: + - alert: KubeCPUOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. + summary: Cluster has overcommitted CPU resource requests. + expr: | + sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) + / + sum(kube_node_status_allocatable{resource="cpu"}) + > + (count(kube_node_status_allocatable{resource="cpu"}) -1) / count(kube_node_status_allocatable{resource="cpu"}) + for: 5m + labels: + severity: warning + - alert: KubeMemoryOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. + summary: Cluster has overcommitted memory resource requests. + expr: | + sum(namespace_memory:kube_pod_container_resource_requests_bytes:sum{}) + / + sum(kube_node_status_allocatable{resource="memory"}) + > + (count(kube_node_status_allocatable{resource="memory"})-1) + / + count(kube_node_status_allocatable{resource="memory"}) + for: 5m + labels: + severity: warning + - alert: KubeCPUQuotaOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Namespaces. + summary: Cluster has overcommitted CPU resource requests. + expr: "sum(kube_resourcequota{namespace=~\"(kube-.*|default|logging)\",job=\"kube-state-metrics\", type=\"hard\", resource=\"cpu\"})\n /\nsum(kube_node_status_allocatable{resource=\"cpu\"}) \n > 1.5\n" + for: 5m + labels: + severity: warning + - alert: KubeMemoryQuotaOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Namespaces. + summary: Cluster has overcommitted memory resource requests. + expr: | + sum(kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard", resource="memory"}) + / + sum(kube_node_status_allocatable{resource="memory",job="kube-state-metrics"}) + > 1.5 + for: 5m + labels: + severity: warning + - alert: KubeQuotaAlmostFull + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota is going to be full. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + > 0.9 < 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaFullyUsed + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota is fully used. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + == 1 + for: 15m + labels: + severity: info + - alert: KubeQuotaExceeded + annotations: + description: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. + summary: Namespace quota has exceeded the limits. + expr: | + kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{namespace=~"(kube-.*|default|logging)",job="kube-state-metrics", type="hard"} > 0) + > 1 + for: 15m + labels: + severity: warning + - name: kubernetes-storage + rules: + # - alert: KubePersistentVolumeFillingUp + # annotations: + # description: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free. + # summary: PersistentVolume is filling up. + # expr: | + # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # / + # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # < 0.03 + # for: 1m + # labels: + # severity: critical + # - alert: KubePersistentVolumeFillingUp + # annotations: + # description: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available. + # summary: PersistentVolume is filling up. + # expr: | + # ( + # kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # / + # kubelet_volume_stats_capacity_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"} + # ) < 0.15 + # and + # predict_linear(kubelet_volume_stats_available_bytes{namespace=~"(kube-.*|default|logging)",job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + # for: 1h + # labels: + # severity: warning + - alert: KubePersistentVolumeErrors + annotations: + description: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}. + summary: PersistentVolume is having issues with provisioning. + expr: | + kube_persistentvolume_status_phase{phase=~"Failed|Pending",namespace=~"(kube-.*|default|logging)",job="kube-state-metrics"} > 0 + for: 5m + labels: + severity: critical + - name: kubernetes-system + rules: + - alert: KubeClientErrors + annotations: + description: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.' + summary: Kubernetes API server client is experiencing errors. + expr: | + (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) + / + sum(rate(rest_client_requests_total[5m])) by (instance, job)) + > 0.01 + for: 15m + labels: + severity: warning + - name: kube-apiserver-slos + rules: + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate1h) > (14.40 * 0.01000) + and + sum(apiserver_request:burnrate5m) > (14.40 * 0.01000) + for: 2m + labels: + long: 1h + severity: critical + short: 5m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate6h) > (6.00 * 0.01000) + and + sum(apiserver_request:burnrate30m) > (6.00 * 0.01000) + for: 15m + labels: + long: 6h + severity: critical + short: 30m + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate1d) > (3.00 * 0.01000) + and + sum(apiserver_request:burnrate2h) > (3.00 * 0.01000) + for: 1h + labels: + long: 1d + severity: warning + short: 2h + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + summary: The API server is burning too much error budget. + expr: | + sum(apiserver_request:burnrate3d) > (1.00 * 0.01000) + and + sum(apiserver_request:burnrate6h) > (1.00 * 0.01000) + for: 3h + labels: + long: 3d + severity: warning + short: 6h + - name: kubernetes-system-apiserver + rules: + - alert: AggregatedAPIErrors + annotations: + description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. It has appeared unavailable {{ $value | humanize }} times averaged over the past 10m. + summary: An aggregated API has reported errors. + expr: | + sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4 + labels: + severity: warning + - alert: AggregatedAPIDown + annotations: + description: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m. + summary: An aggregated API is down. + expr: | + (1 - max by(name, namespace)(avg_over_time(aggregator_unavailable_apiservice[10m]))) * 100 < 85 + for: 15m + labels: + severity: warning + - alert: KubeAPIDown + annotations: + description: KubeAPI has disappeared from Prometheus target discovery. + summary: Target disappeared from Prometheus target discovery. + expr: | + absent(up{job="apiserver"} == 1) + for: 15m + labels: + severity: critical + - alert: KubeAPITerminatedRequests + annotations: + description: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. + summary: The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests. + expr: | + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20 + for: 5m + labels: + severity: warning + - name: kubernetes-system-kubelet + rules: + - alert: KubeNodeNotReady + annotations: + description: '{{ $labels.node }} has been unready for more than 15 minutes.' + summary: Node is not ready. + expr: | + kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 + for: 15m + labels: + severity: warning + - alert: KubeNodeUnreachable + annotations: + description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.' + summary: Node is unreachable. + expr: | + (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1 + for: 15m + labels: + severity: warning + - alert: KubeletTooManyPods + annotations: + description: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity. + summary: Kubelet is running at capacity. + expr: | + count by(node) ( + (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} == 1) * on(instance,pod,namespace,cluster) group_left(node) topk by(instance,pod,namespace,cluster) (1, kube_pod_info{job="kube-state-metrics"}) + ) + / + max by(node) ( + kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1 + ) > 0.95 + for: 15m + labels: + severity: warning + - alert: KubeNodeReadinessFlapping + annotations: + description: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes. + summary: Node readiness status is flapping. + expr: | + sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (node) > 2 + for: 15m + labels: + severity: warning + - alert: KubeletPlegDurationHigh + annotations: + description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}. + summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist. + expr: | + node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 + for: 5m + labels: + severity: warning + # - alert: KubeletPodStartUpLatencyHigh + # annotations: + # description: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}. + # summary: Kubelet Pod startup latency is too high. + # expr: | + # histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 + # for: 15m + # labels: + # severity: warning + - alert: KubeletClientCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes). + summary: Kubelet has failed to renew its client certificate. + expr: | + increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: KubeletServerCertificateRenewalErrors + annotations: + description: Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes). + summary: Kubelet has failed to renew its server certificate. + expr: | + increase(kubelet_server_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning + # - alert: KubeletDown + # annotations: + # description: Kubelet has disappeared from Prometheus target discovery. + # summary: Target disappeared from Prometheus target discovery. + # expr: | + # absent(up{job="kubelet", metrics_path="/metrics"} == 1) + # for: 15m + # labels: + # severity: critical + - name: node-exporter + rules: + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up. + summary: Filesystem is predicted to run out of space within the next 24 hours. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast. + summary: Filesystem is predicted to run out of space within the next 4 hours. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 15 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + summary: Filesystem has less than 5% space left. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. + summary: Filesystem has less than 3% space left. + expr: | + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up. + summary: Filesystem is predicted to run out of inodes within the next 24 hours. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast. + summary: Filesystem is predicted to run out of inodes within the next 4 hours. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 20 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + summary: Filesystem has less than 5% inodes left. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. + summary: Filesystem has less than 3% inodes left. + expr: | + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical + - alert: NodeNetworkReceiveErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' + summary: Network interface is reporting many receive errors. + expr: | + rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeNetworkTransmitErrs + annotations: + description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' + summary: Network interface is reporting many transmit errors. + expr: | + rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning + - alert: NodeHighNumberConntrackEntriesUsed + annotations: + description: '{{ $value | humanizePercentage }} of conntrack entries are used.' + summary: Number of conntrack are getting close to the limit. + expr: | + (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 + labels: + severity: warning + - alert: NodeTextFileCollectorScrapeError + annotations: + description: Node Exporter text file collector failed to scrape. + summary: Node Exporter text file collector failed to scrape. + expr: | + node_textfile_scrape_error{job="node-exporter"} == 1 + labels: + severity: warning + - alert: NodeClockSkewDetected + annotations: + description: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. + summary: Clock skew detected. + expr: | + ( + node_timex_offset_seconds > 0.05 + and + deriv(node_timex_offset_seconds[5m]) >= 0 + ) + or + ( + node_timex_offset_seconds < -0.05 + and + deriv(node_timex_offset_seconds[5m]) <= 0 + ) + for: 10m + labels: + severity: warning + - alert: NodeClockNotSynchronising + annotations: + description: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host. + summary: Clock not synchronising. + expr: | + min_over_time(node_timex_sync_status[5m]) == 0 + and + node_timex_maxerror_seconds >= 16 + for: 10m + labels: + severity: warning + - alert: NodeRAIDDegraded + annotations: + description: RAID array '{{ $labels.device }}' on {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. + summary: RAID Array is degraded + expr: | + node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 + for: 15m + labels: + severity: critical + - alert: NodeRAIDDiskFailure + annotations: + description: At least one device in RAID array on {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap. + summary: Failed device in RAID array + expr: | + node_md_disks{state="failed"} > 0 + labels: + severity: warning + - name: prometheus ## prometheus + rules: + - alert: PrometheusBadConfig + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration. + summary: Failed Prometheus configuration reload. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_config_last_reload_successful{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) == 0 + for: 10m + labels: + severity: critical + - alert: PrometheusNotificationQueueRunningFull + annotations: + description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full. + summary: Prometheus alert notification queue predicted to run full in less than 30m. + expr: | + # Without min_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + predict_linear(prometheus_notifications_queue_length{job=~"prometheus-k8s|prometheus-user-workload"}[5m], 60 * 30) + > + min_over_time(prometheus_notifications_queue_capacity{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers + annotations: + description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.' + summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. + expr: | + ( + rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + / + rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + * 100 + > 1 + for: 15m + labels: + severity: warning + - alert: PrometheusNotConnectedToAlertmanagers + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers. + summary: Prometheus is not connected to any Alertmanagers. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_notifications_alertmanagers_discovered{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) < 1 + for: 10m + labels: + severity: warning + - alert: PrometheusTSDBReloadsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h. + summary: Prometheus has issues reloading blocks from disk. + expr: | + increase(prometheus_tsdb_reloads_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusTSDBCompactionsFailing + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h. + summary: Prometheus has issues compacting blocks. + expr: | + increase(prometheus_tsdb_compactions_failed_total{job=~"prometheus-k8s|prometheus-user-workload"}[3h]) > 0 + for: 4h + labels: + severity: warning + - alert: PrometheusNotIngestingSamples + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples. + summary: Prometheus is not ingesting samples. + expr: | + ( + rate(prometheus_tsdb_head_samples_appended_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) <= 0 + and + ( + sum without(scrape_job) (prometheus_target_metadata_cache_entries{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 + or + sum without(rule_group) (prometheus_rule_group_rules{job=~"prometheus-k8s|prometheus-user-workload"}) > 0 + ) + ) + for: 10m + labels: + severity: warning + - alert: PrometheusDuplicateTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp. + summary: Prometheus is dropping samples with duplicate timestamps. + expr: | + rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 1h + labels: + severity: warning + - alert: PrometheusOutOfOrderTimestamps + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order. + summary: Prometheus drops samples with out-of-order timestamps. + expr: | + rate(prometheus_target_scrapes_sample_out_of_order_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 1h + labels: + severity: warning + - alert: PrometheusRemoteStorageFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }} + summary: Prometheus fails to send samples to remote storage. + expr: | + ( + rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + / + ( + rate(prometheus_remote_storage_failed_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + + + rate(prometheus_remote_storage_succeeded_samples_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + ) + * 100 + > 1 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteBehind + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}. + summary: Prometheus remote write is behind. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + - ignoring(remote_name, url) group_right + max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + > 120 + for: 15m + labels: + severity: critical + - alert: PrometheusRemoteWriteDesiredShards + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job=~"prometheus-k8s|prometheus-user-workload"}` $labels.instance | query | first | value }}. + summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. + expr: | + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_shards_desired{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + > + max_over_time(prometheus_remote_storage_shards_max{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) + ) + for: 15m + labels: + severity: warning + - alert: PrometheusRuleFailures + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m. + summary: Prometheus is failing rule evaluations. + expr: | + increase(prometheus_rule_evaluation_failures_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: critical + - alert: PrometheusMissingRuleEvaluations + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m. + summary: Prometheus is missing rule evaluations due to slow rule group evaluation. + expr: | + increase(prometheus_rule_group_iterations_missed_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusTargetLimitHit + annotations: + description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has dropped {{ printf "%.0f" $value }} targets because the number of targets exceeded the configured target_limit. + summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit. + expr: | + increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job=~"prometheus-k8s|prometheus-user-workload"}[5m]) > 0 + for: 15m + labels: + severity: warning + - alert: PrometheusErrorSendingAlertsToAnyAlertmanager + annotations: + description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.' + summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. + expr: | + min without (alertmanager) ( + rate(prometheus_notifications_errors_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) + / + rate(prometheus_notifications_sent_total{job=~"prometheus-k8s|prometheus-user-workload",alertmanager!~``}[5m]) + ) + * 100 + > 3 + for: 15m + labels: + severity: critical diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/kustomization.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kustomization.yaml new file mode 100644 index 000000000..6f1feeda9 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- k8s.yaml +- kube-apiserver-availability.yaml +- kube-apiserver-histogram.yaml +- kube-apiserver.yaml +- kube-prometheus-node-recording.yaml +- kube-scheduler.yaml +- kubelet.yaml +- kubernetes-monitoring-alertingrules.yaml +- node.yaml +- node-exporter.yaml + diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/node-exporter.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/node-exporter.yaml new file mode 100644 index 000000000..4c9b03ec1 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/node-exporter.yaml @@ -0,0 +1,67 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node-exporter.rules.yaml +# changes: change 5m to 1m to be compatible with old vesion grafana dashboards + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: node-exporter-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: node-exporter.rules + rules: + - expr: | + count without (cpu) ( + count without (mode) ( + node_cpu_seconds_total{job="node-exporter"} + ) + ) + record: instance:node_num_cpu:sum + - expr: | + 1 - avg without (cpu, mode) ( + rate(node_cpu_seconds_total{job="node-exporter", mode="idle"}[1m]) + ) + record: instance:node_cpu_utilisation:rate1m + - expr: | + ( + node_load1{job="node-exporter"} + / + instance:node_num_cpu:sum{job="node-exporter"} + ) + record: instance:node_load1_per_cpu:ratio + - expr: | + 1 - ( + node_memory_MemAvailable_bytes{job="node-exporter"} + / + node_memory_MemTotal_bytes{job="node-exporter"} + ) + record: instance:node_memory_utilisation:ratio + - expr: | + rate(node_vmstat_pgmajfault{job="node-exporter"}[1m]) + record: instance:node_vmstat_pgmajfault:rate1m + - expr: | + rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_seconds:rate1m + - expr: | + rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_weighted_seconds:rate1m + - expr: | + sum without (device) ( + rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_bytes_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_bytes_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_drop_excluding_lo:rate1m + - expr: | + sum without (device) ( + rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_drop_excluding_lo:rate1m \ No newline at end of file diff --git a/operators/endpointmetrics/manifests/prometheus/prometheusrules/node.yaml b/operators/endpointmetrics/manifests/prometheus/prometheusrules/node.yaml new file mode 100644 index 000000000..a35c08ee8 --- /dev/null +++ b/operators/endpointmetrics/manifests/prometheus/prometheusrules/node.yaml @@ -0,0 +1,35 @@ +# https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/templates/prometheus/rules-1.14/node.rules.yaml + +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: node-rules + namespace: open-cluster-management-addon-observability +spec: + groups: + - name: node.rules + rules: + - expr: | + topk by(namespace, pod) (1, + max by (node, namespace, pod) ( + label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") + )) + record: 'node_namespace_pod:kube_pod_info:' + - expr: | + count by (cluster, node) (sum by (node, cpu) ( + node_cpu_seconds_total{job="node-exporter"} + * on (namespace, pod) group_left(node) + topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:) + )) + record: node:node_num_cpu:sum + - expr: | + sum( + node_memory_MemAvailable_bytes{job="node-exporter"} or + ( + node_memory_Buffers_bytes{job="node-exporter"} + + node_memory_Cached_bytes{job="node-exporter"} + + node_memory_MemFree_bytes{job="node-exporter"} + + node_memory_Slab_bytes{job="node-exporter"} + ) + ) by (cluster) + record: :node_memory_MemAvailable_bytes:sum diff --git a/operators/endpointmetrics/pkg/rendering/renderer.go b/operators/endpointmetrics/pkg/rendering/renderer.go index 96ec346eb..86c5f842e 100644 --- a/operators/endpointmetrics/pkg/rendering/renderer.go +++ b/operators/endpointmetrics/pkg/rendering/renderer.go @@ -9,6 +9,7 @@ import ( "os" "strings" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -48,7 +49,11 @@ var ( var Images = map[string]string{} -func Render(r *rendererutil.Renderer, c runtimeclient.Client, hubInfo *operatorconfig.HubInfo) ([]*unstructured.Unstructured, error) { +func Render( + r *rendererutil.Renderer, + c runtimeclient.Client, + hubInfo *operatorconfig.HubInfo, +) ([]*unstructured.Unstructured, error) { genericTemplates, err := templates.GetTemplates(templatesutil.GetTemplateRenderer()) if err != nil { @@ -80,20 +85,46 @@ func Render(r *rendererutil.Renderer, c runtimeclient.Client, hubInfo *operatorc } resources[idx].Object = unstructuredObj } - if resources[idx].GetKind() == "StatefulSet" && resources[idx].GetName() == "prometheus-k8s" { + if resources[idx].GetKind() == "Deployment" && resources[idx].GetName() == "prometheus-operator" { obj := util.GetK8sObj(resources[idx].GetKind()) err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) if err != nil { return nil, err } - sts := obj.(*v1.StatefulSet) - spec := &sts.Spec.Template.Spec - spec.Containers[0].Image = Images[operatorconfig.PrometheusKey] - spec.Containers[1].Image = Images[operatorconfig.KubeRbacProxyKey] - spec.Containers[2].Image = Images[operatorconfig.ConfigmapReloaderKey] + dep := obj.(*v1.Deployment) + spec := &dep.Spec.Template.Spec + spec.Containers[0].Image = Images[operatorconfig.PrometheusOperatorKey] + spec.ImagePullSecrets = []corev1.LocalObjectReference{ + {Name: os.Getenv(operatorconfig.PullSecret)}, + } + args := spec.Containers[0].Args + for idx := range args { + args[idx] = strings.Replace(args[idx], "{{NAMESPACE}}", namespace, 1) + args[idx] = strings.Replace(args[idx], "{{PROM_CONFIGMAP_RELOADER_IMG}}", Images[operatorconfig.PrometheusConfigmapReloaderKey], 1) + } + spec.Containers[0].Args = args + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + if resources[idx].GetKind() == "Prometheus" && resources[idx].GetName() == "k8s" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err + } + prom := obj.(*prometheusv1.Prometheus) + spec := &prom.Spec + image := Images[operatorconfig.PrometheusKey] + spec.Image = &image + spec.Containers[0].Image = Images[operatorconfig.KubeRbacProxyKey] spec.ImagePullSecrets = []corev1.LocalObjectReference{ {Name: os.Getenv(operatorconfig.PullSecret)}, } + spec.ExternalLabels["cluster"] = hubInfo.ClusterName unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) if err != nil { @@ -121,22 +152,21 @@ func Render(r *rendererutil.Renderer, c runtimeclient.Client, hubInfo *operatorc } resources[idx].Object = unstructuredObj } - if resources[idx].GetKind() == "ConfigMap" && resources[idx].GetName() == "prometheus-k8s-config" { + if resources[idx].GetKind() == "Secret" && resources[idx].GetName() == "prometheus-scrape-targets " { obj := util.GetK8sObj(resources[idx].GetKind()) err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) if err != nil { return nil, err } - cm := obj.(*corev1.ConfigMap) - promConfig, exists := cm.Data["prometheus.yaml"] + s := obj.(*corev1.Secret) + promConfig, exists := s.StringData["scrape-targets.yaml"] if !exists { - return nil, fmt.Errorf("no key 'prometheus.yaml' found in the configmap: %s/%s", cm.GetNamespace(), cm.GetName()) + return nil, fmt.Errorf( + "no key 'scrape-targets.yaml' found in the secret: %s/%s", + s.GetNamespace(), + s.GetName(), + ) } - // replace the hub alertmanager address - hubAmEp := strings.TrimLeft(hubInfo.AlertmanagerEndpoint, "https://") - promConfig = strings.ReplaceAll(promConfig, "_ALERTMANAGER_ENDPOINT_", hubAmEp) - // replace the cluster ID with clusterName in hubInfo - promConfig = strings.ReplaceAll(promConfig, "_CLUSTERID_", hubInfo.ClusterName) // replace the disabled metrics disabledMetricsSt, err := getDisabledMetrics(c) @@ -144,8 +174,34 @@ func Render(r *rendererutil.Renderer, c runtimeclient.Client, hubInfo *operatorc return nil, err } if disabledMetricsSt != "" { - cm.Data["prometheus.yaml"] = strings.ReplaceAll(promConfig, "_DISABLED_METRICS_", disabledMetricsSt) + s.StringData["scrape-targets.yaml"] = strings.ReplaceAll(promConfig, "_DISABLED_METRICS_", disabledMetricsSt) + } + + unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + resources[idx].Object = unstructuredObj + } + if resources[idx].GetKind() == "Secret" && resources[idx].GetName() == "prometheus-alertmanager" { + obj := util.GetK8sObj(resources[idx].GetKind()) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(resources[idx].Object, obj) + if err != nil { + return nil, err } + s := obj.(*corev1.Secret) + amConfig, exists := s.StringData["alertmanager.yaml"] + if !exists { + return nil, fmt.Errorf( + "no key 'alertmanager.yaml' found in the configmap: %s/%s", + s.GetNamespace(), + s.GetName(), + ) + } + // replace the hub alertmanager address + hubAmEp := strings.TrimLeft(hubInfo.AlertmanagerEndpoint, "https://") + amConfig = strings.ReplaceAll(amConfig, "_ALERTMANAGER_ENDPOINT_", hubAmEp) + s.StringData["alertmanager.yaml"] = amConfig unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) if err != nil { diff --git a/operators/endpointmetrics/pkg/rendering/templates/templates.go b/operators/endpointmetrics/pkg/rendering/templates/templates.go index 6347431b4..4d470904d 100644 --- a/operators/endpointmetrics/pkg/rendering/templates/templates.go +++ b/operators/endpointmetrics/pkg/rendering/templates/templates.go @@ -15,10 +15,20 @@ func GetTemplates(r *templates.TemplateRenderer) ([]*resource.Resource, error) { // resourceList contains all kustomize resources resourceList := []*resource.Resource{} + // add prometheus template + if err := r.AddTemplateFromPath(r.GetTemplatesPath()+"/prometheus/crd", &resourceList); err != nil { + return resourceList, err + } + // add prometheus template if err := r.AddTemplateFromPath(r.GetTemplatesPath()+"/prometheus", &resourceList); err != nil { return resourceList, err } + // add prometheus template + if err := r.AddTemplateFromPath(r.GetTemplatesPath()+"/prometheus/prometheusrules", &resourceList); err != nil { + return resourceList, err + } + return resourceList, nil } diff --git a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go index 9055631c4..6e4f428dd 100644 --- a/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go +++ b/operators/multiclusterobservability/api/shared/multiclusterobservability_shared.go @@ -33,12 +33,15 @@ type ObservabilityAddonSpec struct { type PreConfiguredStorage struct { // The key of the secret to select from. Must be a valid secret key. - // Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + // Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. // +required Key string `json:"key"` // Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // +required Name string `json:"name"` + // TLS secret contains the custom certificate for the object store + // +optional + TLSSecretName string `json:"tlsSecretName,omitempty"` } // Condition is from metav1.Condition. diff --git a/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go index f684dd847..b71a95b5a 100644 --- a/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go +++ b/operators/multiclusterobservability/api/v1beta1/multiclusterobservability_types.go @@ -45,7 +45,7 @@ type MultiClusterObservabilitySpec struct { // Pull policy of the MultiClusterObservability images // +optional - // +kubebuilder:default:=Always + // +kubebuilder:default:=IfNotPresent ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"` // Pull secret of the MultiClusterObservability images diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go index c5213e970..c04e28cc4 100644 --- a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_types.go @@ -67,19 +67,19 @@ type AdvancedConfig struct { QueryFrontend *CommonSpec `json:"queryFrontend,omitempty"` // spec for thanos-query // +optional - Query *CommonSpec `json:"query,omitempty"` + Query *QuerySpec `json:"query,omitempty"` // spec for thanos-compact // +optional Compact *CompactSpec `json:"compact,omitempty"` // spec for thanos-receiver // +optional - Receive *CommonSpec `json:"receive,omitempty"` + Receive *ReceiveSpec `json:"receive,omitempty"` // spec for thanos-rule // +optional Rule *RuleSpec `json:"rule,omitempty"` // spec for thanos-store-shard // +optional - Store *CommonSpec `json:"store,omitempty"` + Store *StoreSpec `json:"store,omitempty"` } type CommonSpec struct { @@ -91,12 +91,43 @@ type CommonSpec struct { Replicas *int32 `json:"replicas,omitempty"` } +// Thanos Query Spec +type QuerySpec struct { + // Annotations is an unstructured key value map stored with a service account + // +optional + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + + CommonSpec `json:",inline"` +} + +// Thanos Receive Spec +type ReceiveSpec struct { + // Annotations is an unstructured key value map stored with a service account + // +optional + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + + CommonSpec `json:",inline"` +} + +// Thanos Store Spec +type StoreSpec struct { + // Annotations is an unstructured key value map stored with a service account + // +optional + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + + CommonSpec `json:",inline"` +} + // Thanos Rule Spec type RuleSpec struct { // Evaluation interval // +optional EvalInterval string `json:"evalInterval,omitempty"` + // Annotations is an unstructured key value map stored with a service account + // +optional + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` + CommonSpec `json:",inline"` } @@ -105,6 +136,9 @@ type CompactSpec struct { // Compute Resources required by the compact. // +optional Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // Annotations is an unstructured key value map stored with a service account + // +optional + ServiceAccountAnnotations map[string]string `json:"serviceAccountAnnotations,omitempty"` } // CacheConfig is the spec of memcached. @@ -155,6 +189,9 @@ type StorageConfig struct { // Object store config secret for metrics // +required MetricObjectStorage *observabilityshared.PreConfiguredStorage `json:"metricObjectStorage"` + // WriteStorage storage config secret list for metrics + // +optional + WriteStorage []*observabilityshared.PreConfiguredStorage `json:"writeStorage,omitempty"` // Specify the storageClass Stateful Sets. This storage class will also // be used for Object Storage if MetricObjectStorage was configured for // the system to create the storage. diff --git a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go index 8b5c54a2d..4588e07f5 100644 --- a/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go +++ b/operators/multiclusterobservability/api/v1beta2/multiclusterobservability_webhook.go @@ -88,7 +88,8 @@ func (mco *MultiClusterObservability) validateMultiClusterObservabilityName() *f } // validateMultiClusterObservabilitySpec validates the spec of the MultiClusterObservability CR. -// notice that some fields are declaratively validated by OpenAPI schema with `// +kubebuilder:validation` in the type definition. +// notice that some fields are declaratively validated by OpenAPI schema with `// +kubebuilder:validation` in the type +// definition. func (mco *MultiClusterObservability) validateMultiClusterObservabilitySpec() *field.Error { // The field helpers from the kubernetes API machinery help us return nicely structured validation errors. return nil @@ -99,8 +100,11 @@ func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilitySpe return mco.validateUpdateMultiClusterObservabilityStorageSize(old) } -// validateUpdateMultiClusterObservabilityStorageSize validates the update of storage size in the MultiClusterObservability CR. -func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilityStorageSize(old runtime.Object) field.ErrorList { +// validateUpdateMultiClusterObservabilityStorageSize validates the update of storage size in the +// MultiClusterObservability CR. +func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilityStorageSize( + old runtime.Object, +) field.ErrorList { var errs field.ErrorList oldMCO := old.(*MultiClusterObservability) kubeClient, err := createOrGetKubeClient() @@ -129,19 +133,34 @@ func (mco *MultiClusterObservability) validateUpdateMultiClusterObservabilitySto storageConfigFieldPath := field.NewPath("spec").Child("storageConfig") storageForbiddenResize := "is forbidden to update." if mcoOldConfig.AlertmanagerStorageSize != mcoNewConfig.AlertmanagerStorageSize { - errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("alertmanagerStorageSize"), storageForbiddenResize)) + errs = append( + errs, + field.Forbidden(storageConfigFieldPath.Child("alertmanagerStorageSize"), storageForbiddenResize), + ) } if mcoOldConfig.CompactStorageSize != mcoNewConfig.CompactStorageSize { - errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("compactStorageSize"), storageForbiddenResize)) + errs = append( + errs, + field.Forbidden(storageConfigFieldPath.Child("compactStorageSize"), storageForbiddenResize), + ) } if mcoOldConfig.ReceiveStorageSize != mcoNewConfig.ReceiveStorageSize { - errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("receiveStorageSize"), storageForbiddenResize)) + errs = append( + errs, + field.Forbidden(storageConfigFieldPath.Child("receiveStorageSize"), storageForbiddenResize), + ) } if mcoOldConfig.StoreStorageSize != mcoNewConfig.StoreStorageSize { - errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("storeStorageSize"), storageForbiddenResize)) + errs = append( + errs, + field.Forbidden(storageConfigFieldPath.Child("storeStorageSize"), storageForbiddenResize), + ) } if mcoOldConfig.RuleStorageSize != mcoNewConfig.RuleStorageSize { - errs = append(errs, field.Forbidden(storageConfigFieldPath.Child("ruleStorageSize"), storageForbiddenResize)) + errs = append( + errs, + field.Forbidden(storageConfigFieldPath.Child("ruleStorageSize"), storageForbiddenResize), + ) } return errs } @@ -162,7 +181,10 @@ func createOrGetKubeClient() (kubernetes.Interface, error) { } // getSelectedStorageClassForMultiClusterObservability get secected for the MultiClusterObservability CR -func getSelectedStorageClassForMultiClusterObservability(c kubernetes.Interface, mco *MultiClusterObservability) (string, error) { +func getSelectedStorageClassForMultiClusterObservability( + c kubernetes.Interface, + mco *MultiClusterObservability, +) (string, error) { scInCR := "" if mco.Spec.StorageConfig != nil { scInCR = mco.Spec.StorageConfig.StorageClass diff --git a/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go b/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go index eca667ab7..09761a165 100644 --- a/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go +++ b/operators/multiclusterobservability/api/v1beta2/zz_generated.deepcopy.go @@ -72,7 +72,7 @@ func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) { } if in.Query != nil { in, out := &in.Query, &out.Query - *out = new(CommonSpec) + *out = new(QuerySpec) (*in).DeepCopyInto(*out) } if in.Compact != nil { @@ -82,7 +82,7 @@ func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) { } if in.Receive != nil { in, out := &in.Receive, &out.Receive - *out = new(CommonSpec) + *out = new(ReceiveSpec) (*in).DeepCopyInto(*out) } if in.Rule != nil { @@ -92,7 +92,7 @@ func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) { } if in.Store != nil { in, out := &in.Store, &out.Store - *out = new(CommonSpec) + *out = new(StoreSpec) (*in).DeepCopyInto(*out) } } @@ -166,6 +166,13 @@ func (in *CompactSpec) DeepCopyInto(out *CompactSpec) { *out = new(v1.ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompactSpec. @@ -303,6 +310,52 @@ func (in *MultiClusterObservabilityStatus) DeepCopy() *MultiClusterObservability return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { + *out = *in + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySpec. +func (in *QuerySpec) DeepCopy() *QuerySpec { + if in == nil { + return nil + } + out := new(QuerySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReceiveSpec) DeepCopyInto(out *ReceiveSpec) { + *out = *in + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReceiveSpec. +func (in *ReceiveSpec) DeepCopy() *ReceiveSpec { + if in == nil { + return nil + } + out := new(ReceiveSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RetentionConfig) DeepCopyInto(out *RetentionConfig) { *out = *in @@ -321,6 +374,13 @@ func (in *RetentionConfig) DeepCopy() *RetentionConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuleSpec) DeepCopyInto(out *RuleSpec) { *out = *in + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } in.CommonSpec.DeepCopyInto(&out.CommonSpec) } @@ -342,6 +402,17 @@ func (in *StorageConfig) DeepCopyInto(out *StorageConfig) { *out = new(shared.PreConfiguredStorage) **out = **in } + if in.WriteStorage != nil { + in, out := &in.WriteStorage, &out.WriteStorage + *out = make([]*shared.PreConfiguredStorage, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(shared.PreConfiguredStorage) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfig. @@ -353,3 +424,26 @@ func (in *StorageConfig) DeepCopy() *StorageConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreSpec) DeepCopyInto(out *StoreSpec) { + *out = *in + if in.ServiceAccountAnnotations != nil { + in, out := &in.ServiceAccountAnnotations, &out.ServiceAccountAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreSpec. +func (in *StoreSpec) DeepCopy() *StoreSpec { + if in == nil { + return nil + } + out := new(StoreSpec) + in.DeepCopyInto(out) + return out +} diff --git a/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml b/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml index 97b676551..6f9a240b1 100644 --- a/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml +++ b/operators/multiclusterobservability/bundle/manifests/core.observatorium.io_observatoria.yaml @@ -374,9 +374,29 @@ spec: api: description: API properties: + additionalWriteEndpoints: + description: AdditionalWriteEndpoints is a slice of additional write endpoint for the Observatorium API. + properties: + endpointsConfigSecret: + description: Secret name for the endpoints configuration + type: string + mountPath: + description: Mount path for the secrets + type: string + mountSecrets: + description: Secret list to be mounted + items: + type: string + type: array + required: + - endpointsConfigSecret + type: object image: description: API image type: string + imagePullPolicy: + description: API image pull policy + type: string rbac: description: RBAC is an RBAC configuration for the Observatorium API. properties: @@ -583,6 +603,9 @@ spec: image: description: Loki image type: string + imagePullPolicy: + description: Loki image pull policy + type: string replicas: additionalProperties: format: int32 @@ -729,6 +752,12 @@ spec: name: description: Object Store Config Secret Name type: string + tlsSecretMountPath: + description: TLS secret mount path in thanos store/ruler/compact/receiver + type: string + tlsSecretName: + description: TLS secret contains the custom certificate for the object store + type: string required: - key - name @@ -870,6 +899,11 @@ spec: retentionResolutionRaw: description: RetentionResolutionRaw type: string + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -974,6 +1008,9 @@ spec: image: description: Thanos image type: string + imagePullPolicy: + description: Thanos image pull policy + type: string query: description: Query properties: @@ -1006,6 +1043,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1023,6 +1065,9 @@ spec: exporterImage: description: Memcached Prometheus Exporter image type: string + exporterImagePullPolicy: + description: Memcached Prometheus Exporter image image pull policy + type: string exporterResources: description: Compute Resources required by this container. properties: @@ -1051,6 +1096,9 @@ spec: image: description: Memcached image type: string + imagePullPolicy: + description: Memcached image pull policy + type: string maxItemSize: description: 'Max item size (default: 1m, min: 1k, max: 1024m)' type: string @@ -1127,6 +1175,9 @@ spec: image: description: Receive Controller image type: string + imagePullPolicy: + description: Receive image pull policy + type: string resources: description: Compute Resources required by this container. properties: @@ -1192,6 +1243,11 @@ spec: retention: description: How long to retain raw samples on local storage type: string + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1343,6 +1399,9 @@ spec: reloaderImage: description: ReloaderImage is an image of configmap reloader type: string + reloaderImagePullPolicy: + description: ReloaderImage image pull policy + type: string reloaderResources: description: Compute Resources required by this container. properties: @@ -1409,6 +1468,11 @@ spec: - name type: object type: array + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1520,6 +1584,9 @@ spec: exporterImage: description: Memcached Prometheus Exporter image type: string + exporterImagePullPolicy: + description: Memcached Prometheus Exporter image image pull policy + type: string exporterResources: description: Compute Resources required by this container. properties: @@ -1548,6 +1615,9 @@ spec: image: description: Memcached image type: string + imagePullPolicy: + description: Memcached image pull policy + type: string maxItemSize: description: 'Max item size (default: 1m, min: 1k, max: 1024m)' type: string @@ -1610,6 +1680,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean diff --git a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml index 4f45c7ecd..be827aa51 100644 --- a/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml +++ b/operators/multiclusterobservability/bundle/manifests/multicluster-observability-operator.clusterserviceversion.yaml @@ -416,6 +416,18 @@ spec: - get - list - watch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch serviceAccountName: multicluster-observability-operator deployments: - name: multicluster-observability-operator @@ -455,7 +467,7 @@ spec: - name: SPOKE_NAMESPACE value: open-cluster-management-addon-observability image: quay.io/stolostron/multicluster-observability-operator:latest - imagePullPolicy: Always + imagePullPolicy: IfNotPresent lifecycle: preStop: exec: diff --git a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml index cb8768619..c00261e40 100644 --- a/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/bundle/manifests/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -57,7 +57,7 @@ spec: description: Enable or disable the downsample. The default value is false. This is not recommended as querying long time ranges without non-downsampled data is not efficient and useful. type: boolean imagePullPolicy: - default: Always + default: IfNotPresent description: Pull policy of the MultiClusterObservability images type: string imagePullSecret: @@ -125,11 +125,14 @@ spec: description: Object store config secret for metrics properties: key: - description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + tlsSecretName: + description: TLS secret contains the custom certificate for the object store + type: string required: - key - name @@ -293,6 +296,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object type: object grafana: description: The spec of grafana @@ -383,6 +391,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object type: object queryFrontend: description: spec for thanos-query-frontend @@ -514,6 +527,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object type: object retentionConfig: description: The spec of the data retention configurations @@ -569,6 +587,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object type: object store: description: spec for thanos-store-shard @@ -599,6 +622,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object type: object storeMemcached: description: Specifies the store memcached @@ -709,11 +737,14 @@ spec: description: Object store config secret for metrics properties: key: - description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/storage.md/#configuration for a valid content of key. + description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + tlsSecretName: + description: TLS secret contains the custom certificate for the object store + type: string required: - key - name @@ -734,6 +765,24 @@ spec: default: 10Gi description: The amount of storage applied to thanos store stateful sets, type: string + writeStorage: + description: WriteStorage storage config secret list for metrics + items: + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tlsSecretName: + description: TLS secret contains the custom certificate for the object store + type: string + required: + - key + - name + type: object + type: array required: - metricObjectStorage type: object diff --git a/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml b/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml index fb6c031e0..568cda698 100644 --- a/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml +++ b/operators/multiclusterobservability/config/crd/bases/core.observatorium.io_observatoria.yaml @@ -376,9 +376,29 @@ spec: api: description: API properties: + additionalWriteEndpoints: + description: AdditionalWriteEndpoints is a slice of additional write endpoint for the Observatorium API. + properties: + endpointsConfigSecret: + description: Secret name for the endpoints configuration + type: string + mountPath: + description: Mount path for the secrets + type: string + mountSecrets: + description: Secret list to be mounted + items: + type: string + type: array + required: + - endpointsConfigSecret + type: object image: description: API image type: string + imagePullPolicy: + description: API image pull policy + type: string rbac: description: RBAC is an RBAC configuration for the Observatorium API. properties: @@ -585,6 +605,9 @@ spec: image: description: Loki image type: string + imagePullPolicy: + description: Loki image pull policy + type: string replicas: additionalProperties: format: int32 @@ -731,6 +754,12 @@ spec: name: description: Object Store Config Secret Name type: string + tlsSecretMountPath: + description: TLS secret mount path in thanos store/ruler/compact/receiver + type: string + tlsSecretName: + description: TLS secret contains the custom certificate for the object store + type: string required: - key - name @@ -872,6 +901,11 @@ spec: retentionResolutionRaw: description: RetentionResolutionRaw type: string + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -976,6 +1010,9 @@ spec: image: description: Thanos image type: string + imagePullPolicy: + description: Thanos image pull policy + type: string query: description: Query properties: @@ -1008,6 +1045,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1025,6 +1067,9 @@ spec: exporterImage: description: Memcached Prometheus Exporter image type: string + exporterImagePullPolicy: + description: Memcached Prometheus Exporter image image pull policy + type: string exporterResources: description: Compute Resources required by this container. properties: @@ -1053,6 +1098,9 @@ spec: image: description: Memcached image type: string + imagePullPolicy: + description: Memcached image pull policy + type: string maxItemSize: description: 'Max item size (default: 1m, min: 1k, max: 1024m)' type: string @@ -1129,6 +1177,9 @@ spec: image: description: Receive Controller image type: string + imagePullPolicy: + description: Receive image pull policy + type: string resources: description: Compute Resources required by this container. properties: @@ -1194,6 +1245,11 @@ spec: retention: description: How long to retain raw samples on local storage type: string + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1345,6 +1401,9 @@ spec: reloaderImage: description: ReloaderImage is an image of configmap reloader type: string + reloaderImagePullPolicy: + description: ReloaderImage image pull policy + type: string reloaderResources: description: Compute Resources required by this container. properties: @@ -1411,6 +1470,11 @@ spec: - name type: object type: array + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean @@ -1522,6 +1586,9 @@ spec: exporterImage: description: Memcached Prometheus Exporter image type: string + exporterImagePullPolicy: + description: Memcached Prometheus Exporter image image pull policy + type: string exporterResources: description: Compute Resources required by this container. properties: @@ -1550,6 +1617,9 @@ spec: image: description: Memcached image type: string + imagePullPolicy: + description: Memcached image pull policy + type: string maxItemSize: description: 'Max item size (default: 1m, min: 1k, max: 1024m)' type: string @@ -1612,6 +1682,11 @@ spec: description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map stored with a service account + type: object serviceMonitor: description: ServiceMonitor enables servicemonitor. type: boolean diff --git a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml index b51152836..e21b7f810 100644 --- a/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml +++ b/operators/multiclusterobservability/config/crd/bases/observability.open-cluster-management.io_multiclusterobservabilities.yaml @@ -54,7 +54,7 @@ spec: non-downsampled data is not efficient and useful. type: boolean imagePullPolicy: - default: Always + default: IfNotPresent description: Pull policy of the MultiClusterObservability images type: string imagePullSecret: @@ -132,12 +132,16 @@ spec: properties: key: description: The key of the secret to select from. Must be - a valid secret key. Refer to https://thanos.io/storage.md/#configuration + a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + tlsSecretName: + description: TLS secret contains the custom certificate for + the object store + type: string required: - key - name @@ -359,6 +363,12 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map + stored with a service account + type: object type: object grafana: description: The spec of grafana @@ -464,6 +474,12 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map + stored with a service account + type: object type: object queryFrontend: description: spec for thanos-query-frontend @@ -616,6 +632,12 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map + stored with a service account + type: object type: object retentionConfig: description: The spec of the data retention configurations @@ -685,6 +707,12 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map + stored with a service account + type: object type: object store: description: spec for thanos-store-shard @@ -720,6 +748,12 @@ spec: More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + description: Annotations is an unstructured key value map + stored with a service account + type: object type: object storeMemcached: description: Specifies the store memcached @@ -846,12 +880,16 @@ spec: properties: key: description: The key of the secret to select from. Must be - a valid secret key. Refer to https://thanos.io/storage.md/#configuration + a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage for a valid content of key. type: string name: description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' type: string + tlsSecretName: + description: TLS secret contains the custom certificate for + the object store + type: string required: - key - name @@ -877,6 +915,27 @@ spec: description: The amount of storage applied to thanos store stateful sets, type: string + writeStorage: + description: WriteStorage storage config secret list for metrics + items: + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. Refer to https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage + for a valid content of key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + tlsSecretName: + description: TLS secret contains the custom certificate + for the object store + type: string + required: + - key + - name + type: object + type: array required: - metricObjectStorage type: object diff --git a/operators/multiclusterobservability/config/manager/manager.yaml b/operators/multiclusterobservability/config/manager/manager.yaml index 17669a6cf..a29398a1c 100644 --- a/operators/multiclusterobservability/config/manager/manager.yaml +++ b/operators/multiclusterobservability/config/manager/manager.yaml @@ -31,7 +31,7 @@ spec: - -leader-elect # Replace this with the built image name image: quay.io/stolostron/multicluster-observability-operator:2.3.0-SNAPSHOT-2021-07-26-18-43-26 - imagePullPolicy: Always + imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false ports: diff --git a/operators/multiclusterobservability/config/rbac/mco_role.yaml b/operators/multiclusterobservability/config/rbac/mco_role.yaml index f22ce4bca..d8193d1aa 100644 --- a/operators/multiclusterobservability/config/rbac/mco_role.yaml +++ b/operators/multiclusterobservability/config/rbac/mco_role.yaml @@ -335,3 +335,15 @@ rules: - get - list - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go index cd8708f45..b241d580c 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/grafana.go @@ -72,6 +72,12 @@ func GenerateGrafanaDataSource( scheme *runtime.Scheme, mco *mcov1beta2.MultiClusterObservability) (*ctrl.Result, error) { + DynamicTimeInterval := mco.Spec.ObservabilityAddonSpec.Interval + + if DynamicTimeInterval > 30 && config.CollectRulesEnabled { + DynamicTimeInterval = 30 + } + grafanaDatasources, err := yaml.Marshal(GrafanaDatasources{ APIVersion: 1, Datasources: []*GrafanaDatasource{ @@ -80,12 +86,31 @@ func GenerateGrafanaDataSource( Type: "prometheus", Access: "proxy", IsDefault: true, - URL: fmt.Sprintf("http://%s.%s.svc.cluster.local:8080", config.ProxyServiceName, config.GetDefaultNamespace()), + URL: fmt.Sprintf( + "http://%s.%s.svc.cluster.local:8080", + config.ProxyServiceName, + config.GetDefaultNamespace(), + ), JSONData: &JsonData{ QueryTimeout: "300s", TimeInterval: fmt.Sprintf("%ds", mco.Spec.ObservabilityAddonSpec.Interval), }, }, + { + Name: "Observatorium-Dynamic", + Type: "prometheus", + Access: "proxy", + IsDefault: false, + URL: fmt.Sprintf( + "http://%s.%s.svc.cluster.local:8080", + config.ProxyServiceName, + config.GetDefaultNamespace(), + ), + JSONData: &JsonData{ + QueryTimeout: "300s", + TimeInterval: fmt.Sprintf("%ds", DynamicTimeInterval), + }, + }, }, }) if err != nil { diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go index 3b4c1de82..a332c1131 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller.go @@ -47,7 +47,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" "github.com/stolostron/multicluster-observability-operator/operators/pkg/deploying" commonutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" - mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" ) @@ -95,6 +95,25 @@ func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req reqLogger := log.WithValues("Request.Namespace", req.Namespace, "Request.Name", req.Name) reqLogger.Info("Reconciling MultiClusterObservability") + if res, ok := config.BackupResourceMap[req.Name]; ok { + reqLogger.Info("Adding backup label") + var err error = nil + switch res { + case config.ResourceTypeConfigMap: + err = util.AddBackupLabelToConfigMap(r.Client, req.Name, config.GetDefaultNamespace()) + case config.ResourceTypeSecret: + err = util.AddBackupLabelToSecret(r.Client, req.Name, config.GetDefaultNamespace()) + default: + // we should never be here + log.Info("unknown type " + res) + } + + if err != nil { + reqLogger.Error(err, "Failed to add backup label") + return ctrl.Result{}, err + } + } + // Fetch the MultiClusterObservability instance instance := &mcov1beta2.MultiClusterObservability{} err := r.Client.Get(context.TODO(), types.NamespacedName{ @@ -151,6 +170,16 @@ func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req return ctrl.Result{}, nil } + if _, ok := config.BackupResourceMap[instance.Spec.StorageConfig.MetricObjectStorage.Key]; !ok { + log.Info("Adding backup label", "Secret", instance.Spec.StorageConfig.MetricObjectStorage.Key) + config.BackupResourceMap[instance.Spec.StorageConfig.MetricObjectStorage.Key] = config.ResourceTypeSecret + err = util.AddBackupLabelToSecret(r.Client, instance.Spec.StorageConfig.MetricObjectStorage.Key, config.GetDefaultNamespace()) + if err != nil { + log.Error(err, "Failed to add backup label", "Secret", instance.Spec.StorageConfig.MetricObjectStorage.Key) + return ctrl.Result{}, err + } + } + storageClassSelected, err := getStorageClass(instance, r.Client) if err != nil { return ctrl.Result{}, err @@ -181,15 +210,14 @@ func (r *MultiClusterObservabilityReconciler) Reconcile(ctx context.Context, req ns := &corev1.Namespace{} for _, res := range toDeploy { resNS := res.GetNamespace() - if resNS == config.GetDefaultNamespace() { - if err := controllerutil.SetControllerReference(instance, res, r.Scheme); err != nil { - reqLogger.Error(err, "Failed to set controller reference") - } + if err := controllerutil.SetControllerReference(instance, res, r.Scheme); err != nil { + reqLogger.Error(err, "Failed to set controller reference", "kind", res.GetKind(), "name", res.GetName()) } if resNS == "" { resNS = config.GetDefaultNamespace() } - if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: resNS}, ns); err != nil && apierrors.IsNotFound(err) { + if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: resNS}, ns); err != nil && + apierrors.IsNotFound(err) { ns = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ Name: resNS, }} @@ -271,11 +299,6 @@ func (r *MultiClusterObservabilityReconciler) initFinalization( mco *mcov1beta2.MultiClusterObservability) (bool, error) { if mco.GetDeletionTimestamp() != nil && commonutil.Contains(mco.GetFinalizers(), resFinalizer) { log.Info("To delete resources across namespaces") - svmCrdExists := r.CRDMap[config.StorageVersionMigrationCrdName] - if svmCrdExists { - // remove the StorageVersionMigration resource and ignore error - cleanObservabilityStorageVersionMigrationResource(r.Client, mco) // #nosec - } // clean up the cluster resources, eg. clusterrole, clusterrolebinding, etc if err := cleanUpClusterScopedResources(r.Client, mco); err != nil { log.Error(err, "Failed to remove cluster scoped resources") @@ -356,20 +379,44 @@ func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) cmPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - if e.Object.GetName() == config.AlertRuleCustomConfigMapName && - e.Object.GetNamespace() == config.GetDefaultNamespace() { - config.SetCustomRuleConfigMap(true) - return true + if e.Object.GetNamespace() == config.GetDefaultNamespace() { + if e.Object.GetName() == config.AlertRuleCustomConfigMapName { + config.SetCustomRuleConfigMap(true) + return true + } else if _, ok := e.Object.GetLabels()[config.BackupLabelName]; ok { + // resource already has backup label + return false + } else if _, ok := config.BackupResourceMap[e.Object.GetName()]; ok { + // resource's backup label must be checked + return true + } else if _, ok := e.Object.GetLabels()[config.GrafanaCustomDashboardLabel]; ok { + // ConfigMap with custom-grafana-dashboard labels, check for backup label + config.BackupResourceMap[e.Object.GetName()] = config.ResourceTypeConfigMap + return true + } } return false }, UpdateFunc: func(e event.UpdateEvent) bool { // Find a way to restart the alertmanager to take the update - // if e.ObjectNew.GetName() == config.AlertRuleCustomConfigMapName && - // e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() { - // config.SetCustomRuleConfigMap(true) - // return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() - // } + if e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() { + if e.ObjectNew.GetName() == config.AlertRuleCustomConfigMapName { + // Grafana dynamically loads AlertRule configmap, nothing more to do + //config.SetCustomRuleConfigMap(true) + //return e.ObjectOld.GetResourceVersion() != e.ObjectNew.GetResourceVersion() + return false + } else if _, ok := e.ObjectNew.GetLabels()[config.BackupLabelName]; ok { + // resource already has backup label + return false + } else if _, ok := config.BackupResourceMap[e.ObjectNew.GetName()]; ok { + // resource's backup label must be checked + return true + } else if _, ok := e.ObjectNew.GetLabels()[config.GrafanaCustomDashboardLabel]; ok { + // ConfigMap with custom-grafana-dashboard labels, check for backup label + config.BackupResourceMap[e.ObjectNew.GetName()] = config.ResourceTypeConfigMap + return true + } + } return false }, DeleteFunc: func(e event.DeleteEvent) bool { @@ -384,18 +431,32 @@ func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) secretPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - if e.Object.GetNamespace() == config.GetDefaultNamespace() && - (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || - e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { - return true + if e.Object.GetNamespace() == config.GetDefaultNamespace() { + if e.Object.GetName() == config.AlertmanagerRouteBYOCAName || + e.Object.GetName() == config.AlertmanagerRouteBYOCERTName { + return true + } else if _, ok := e.Object.GetLabels()[config.BackupLabelName]; ok { + // resource already has backup label + return false + } else if _, ok := config.BackupResourceMap[e.Object.GetName()]; ok { + // resource's backup label must be checked + return true + } } return false }, UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() && - (e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCAName || - e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCERTName) { - return true + if e.ObjectNew.GetNamespace() == config.GetDefaultNamespace() { + if e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCAName || + e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCERTName { + return true + } else if _, ok := e.ObjectNew.GetLabels()[config.BackupLabelName]; ok { + // resource already has backup label + return false + } else if _, ok := config.BackupResourceMap[e.ObjectNew.GetName()]; ok { + // resource's backup label must be checked + return true + } } return false }, @@ -427,19 +488,24 @@ func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) Owns(&observatoriumv1alpha1.Observatorium{}). // Watch the configmap for thanos-ruler-custom-rules update Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(cmPred)). + // Watch the secret for deleting event of alertmanager-config Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(secretPred)) - mchGroupKind := schema.GroupKind{Group: mchv1.SchemeGroupVersion.Group, Kind: "MultiClusterHub"} - if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.SchemeGroupVersion.Version); err == nil { + mchGroupKind := schema.GroupKind{Group: mchv1.GroupVersion.Group, Kind: "MultiClusterHub"} + if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.GroupVersion.Version); err == nil { mchPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { // this is for operator restart, the mch CREATE event will be caught and the mch should be ready if e.Object.GetNamespace() == config.GetMCONamespace() && e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && e.Object.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion { - // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully - ok, err := config.ReadImageManifestConfigMap(c, e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion) + // only read the image manifests configmap and enqueue the request when the MCH is + // installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap( + c, + e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion, + ) if err != nil { return false } @@ -451,8 +517,12 @@ func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) if e.ObjectNew.GetNamespace() == config.GetMCONamespace() && e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && e.ObjectNew.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion { - // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully - ok, err := config.ReadImageManifestConfigMap(c, e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion) + // only read the image manifests configmap and enqueue the request when the MCH is + // installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap( + c, + e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion, + ) if err != nil { return false } @@ -468,14 +538,18 @@ func (r *MultiClusterObservabilityReconciler) SetupWithManager(mgr ctrl.Manager) mchCrdExists, _ := r.CRDMap[config.MCHCrdName] if mchCrdExists { // secondary watch for MCH - ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &mchv1.MultiClusterHub{}}, handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { - return []reconcile.Request{ - {NamespacedName: types.NamespacedName{ - Name: config.MCHUpdatedRequestName, - Namespace: a.GetNamespace(), - }}, - } - }), builder.WithPredicates(mchPred)) + ctrBuilder = ctrBuilder.Watches( + &source.Kind{Type: &mchv1.MultiClusterHub{}}, + handler.EnqueueRequestsFromMapFunc(func(a client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + Namespace: a.GetNamespace(), + }}, + } + }), + builder.WithPredicates(mchPred), + ) } } @@ -637,8 +711,16 @@ func GenerateAlertmanagerRoute( amRouteBYOCaSrt := &corev1.Secret{} amRouteBYOCertSrt := &corev1.Secret{} - err1 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, amRouteBYOCaSrt) - err2 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, amRouteBYOCertSrt) + err1 := runclient.Get( + context.TODO(), + types.NamespacedName{Name: config.AlertmanagerRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, + amRouteBYOCaSrt, + ) + err2 := runclient.Get( + context.TODO(), + types.NamespacedName{Name: config.AlertmanagerRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, + amRouteBYOCertSrt, + ) if err1 == nil && err2 == nil { log.Info("BYO CA/Certificate found for the Route of Alertmanager, will using BYO CA/certificate for the Route of Alertmanager") @@ -667,9 +749,19 @@ func GenerateAlertmanagerRoute( } found := &routev1.Route{} - err := runclient.Get(context.TODO(), types.NamespacedName{Name: amGateway.Name, Namespace: amGateway.Namespace}, found) + err := runclient.Get( + context.TODO(), + types.NamespacedName{Name: amGateway.Name, Namespace: amGateway.Namespace}, + found, + ) if err != nil && errors.IsNotFound(err) { - log.Info("Creating a new route to expose alertmanager", "amGateway.Namespace", amGateway.Namespace, "amGateway.Name", amGateway.Name) + log.Info( + "Creating a new route to expose alertmanager", + "amGateway.Namespace", + amGateway.Namespace, + "amGateway.Name", + amGateway.Name, + ) err = runclient.Create(context.TODO(), amGateway) if err != nil { return &ctrl.Result{}, err @@ -713,8 +805,16 @@ func GenerateProxyRoute( proxyRouteBYOCaSrt := &corev1.Secret{} proxyRouteBYOCertSrt := &corev1.Secret{} - err1 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.ProxyRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, proxyRouteBYOCaSrt) - err2 := runclient.Get(context.TODO(), types.NamespacedName{Name: config.ProxyRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, proxyRouteBYOCertSrt) + err1 := runclient.Get( + context.TODO(), + types.NamespacedName{Name: config.ProxyRouteBYOCAName, Namespace: config.GetDefaultNamespace()}, + proxyRouteBYOCaSrt, + ) + err2 := runclient.Get( + context.TODO(), + types.NamespacedName{Name: config.ProxyRouteBYOCERTName, Namespace: config.GetDefaultNamespace()}, + proxyRouteBYOCertSrt, + ) if err1 == nil && err2 == nil { log.Info("BYO CA/Certificate found for the Route of Proxy, will using BYO CA/certificate for the Route of Proxy") @@ -743,9 +843,19 @@ func GenerateProxyRoute( } found := &routev1.Route{} - err := runclient.Get(context.TODO(), types.NamespacedName{Name: proxyGateway.Name, Namespace: proxyGateway.Namespace}, found) + err := runclient.Get( + context.TODO(), + types.NamespacedName{Name: proxyGateway.Name, Namespace: proxyGateway.Namespace}, + found, + ) if err != nil && errors.IsNotFound(err) { - log.Info("Creating a new route to expose rbac proxy", "proxyGateway.Namespace", proxyGateway.Namespace, "proxyGateway.Name", proxyGateway.Name) + log.Info( + "Creating a new route to expose rbac proxy", + "proxyGateway.Namespace", + proxyGateway.Namespace, + "proxyGateway.Name", + proxyGateway.Name, + ) err = runclient.Create(context.TODO(), proxyGateway) if err != nil { return &ctrl.Result{}, err diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go index 578721ff2..abe244569 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_controller_test.go @@ -27,9 +27,10 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log/zap" migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" - mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" @@ -40,6 +41,7 @@ import ( ) func init() { + ctrl.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(os.Stdout))) os.Setenv("UNIT_TEST", "true") } @@ -316,13 +318,16 @@ func TestMultiClusterMonitoringCRUpdate(t *testing.T) { clientCACerts := newTestCert(config.ClientCACerts, namespace) grafanaCert := newTestCert(config.GrafanaCerts, namespace) serverCert := newTestCert(config.ServerCerts, namespace) + //byo case for proxy + proxyRouteBYOCACerts := newTestCert(config.ProxyRouteBYOCAName, namespace) + proxyRouteBYOCert := newTestCert(config.ProxyRouteBYOCERTName, namespace) // byo case for the alertmanager route testAmRouteBYOCaSecret := newTestCert(config.AlertmanagerRouteBYOCAName, namespace) testAmRouteBYOCertSecret := newTestCert(config.AlertmanagerRouteBYOCERTName, namespace) clustermgmtAddon := newClusterManagementAddon() - objs := []runtime.Object{mco, svc, serverCACerts, clientCACerts, grafanaCert, serverCert, - testAmRouteBYOCaSecret, testAmRouteBYOCertSecret, clustermgmtAddon} + objs := []runtime.Object{mco, svc, serverCACerts, clientCACerts, proxyRouteBYOCACerts, grafanaCert, serverCert, + testAmRouteBYOCaSecret, testAmRouteBYOCertSecret, proxyRouteBYOCert, clustermgmtAddon} // Create a fake client to mock API calls. cl := fake.NewFakeClient(objs...) @@ -375,13 +380,67 @@ func TestMultiClusterMonitoringCRUpdate(t *testing.T) { if err != nil { t.Fatalf("Failed to create secret: (%v)", err) } - _, err = r.Reconcile(context.TODO(), req) + + // backup label test for Secret + req2 := ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + + _, err = r.Reconcile(context.TODO(), req2) if err != nil { t.Fatalf("reconcile: (%v)", err) } //wait for update status time.Sleep(1 * time.Second) + updatedObjectStoreSecret := &corev1.Secret{} + err = r.Client.Get(context.TODO(), req2.NamespacedName, updatedObjectStoreSecret) + if err != nil { + t.Fatalf("backup Failed to get ObjectStore secret (%v)", err) + } + + if _, ok := updatedObjectStoreSecret.Labels[config.BackupLabelName]; !ok { + t.Fatalf("Missing backup label on: (%v)", updatedObjectStoreSecret) + } + + // backup label test for Configmap + err = cl.Create(context.TODO(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.AlertRuleCustomConfigMapName, + Namespace: namespace, + }, + }) + if err != nil { + t.Fatalf("Failed to create configmap: (%v)", err) + } + + req2 = ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: config.AlertRuleCustomConfigMapName, + Namespace: namespace, + }, + } + + _, err = r.Reconcile(context.TODO(), req2) + if err != nil { + t.Fatalf("reconcile: (%v)", err) + } + //wait for update status + time.Sleep(1 * time.Second) + + updatedConfigmap := &corev1.ConfigMap{} + err = r.Client.Get(context.TODO(), req2.NamespacedName, updatedConfigmap) + if err != nil { + t.Fatalf("backup Failed to get configmap (%v)", err) + } + + if _, ok := updatedConfigmap.Labels[config.BackupLabelName]; !ok { + t.Fatalf("Missing backup label on: (%v)", updatedConfigmap) + } + updatedMCO = &mcov1beta2.MultiClusterObservability{} err = r.Client.Get(context.TODO(), req.NamespacedName, updatedMCO) if err != nil { diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go index 766e20a28..6deaaa543 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/multiclusterobservability_status.go @@ -100,8 +100,6 @@ func updateStatus(c client.Client) { return } } - - return } // fillup the status if there is no status and lastTransitionTime in upgrade case @@ -348,8 +346,8 @@ func checkObjStorageStatus( func checkAddonSpecStatus(mco *mcov1beta2.MultiClusterObservability) *mcoshared.Condition { addonSpec := mco.Spec.ObservabilityAddonSpec - if addonSpec != nil && addonSpec.EnableMetrics == false { - log.Info("Disable metrics collocter") + if addonSpec != nil && !addonSpec.EnableMetrics { + log.Info("Disable metrics collector") return newMetricsDisabledCondition() } return nil diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go index 0687df844..eca466307 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium.go @@ -6,15 +6,19 @@ package multiclusterobservability import ( "bytes" "context" + "errors" "fmt" "os" + "path" + "reflect" "time" routev1 "github.com/openshift/api/route/v1" obsv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" + "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -23,15 +27,19 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/yaml" + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + mcoutil "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" ) const ( + endpointsConfigName = "observability-remotewrite-endpoints" + endpointsKey = "endpoints.yaml" + obsAPIGateway = "observatorium-api" readOnlyRoleName = "read-only-metrics" @@ -52,15 +60,25 @@ func GenerateObservatoriumCR( return &ctrl.Result{}, err } + // fetch TLS secret mount path from the object store secret + tlsSecretMountPath, err := getTLSSecretMountPath(cl, mco.Spec.StorageConfig.MetricObjectStorage) + if err != nil { + return &ctrl.Result{}, err + } + log.Info("storageClassSelected", "storageClassSelected", storageClassSelected) + obsSpec, err := newDefaultObservatoriumSpec(cl, mco, storageClassSelected, tlsSecretMountPath) + if err != nil { + return &ctrl.Result{}, err + } observatoriumCR := &obsv1alpha1.Observatorium{ ObjectMeta: metav1.ObjectMeta{ Name: mcoconfig.GetOperandName(mcoconfig.Observatorium), Namespace: mcoconfig.GetDefaultNamespace(), Labels: labels, }, - Spec: *newDefaultObservatoriumSpec(mco, storageClassSelected), + Spec: *obsSpec, } // Set MultiClusterObservability instance as the owner and controller @@ -79,7 +97,7 @@ func GenerateObservatoriumCR( observatoriumCRFound, ) - if err != nil && errors.IsNotFound(err) { + if err != nil && k8serrors.IsNotFound(err) { log.Info("Creating a new observatorium CR", "observatorium", observatoriumCR.Name, ) @@ -115,7 +133,7 @@ func GenerateObservatoriumCR( newObj.Spec = newSpec err = cl.Update(context.TODO(), newObj) if err != nil { - log.Error(err, "Failed to update observatorium CR %s", observatoriumCR.Name) + log.Error(err, "Failed to update observatorium CR %s", "name", observatoriumCR.Name) // add timeout for update failure avoid update conflict return &ctrl.Result{RequeueAfter: time.Second * 3}, err } @@ -130,6 +148,40 @@ func GenerateObservatoriumCR( return nil, nil } +func getTLSSecretMountPath(client client.Client, + objectStorage *oashared.PreConfiguredStorage) (string, error) { + found := &v1.Secret{} + err := client.Get( + context.TODO(), + types.NamespacedName{Name: objectStorage.Name, Namespace: mcoconfig.GetDefaultNamespace()}, + found, + ) + if err != nil { + // report the status if the object store is not defined in checkObjStorageStatus method + // here just ignore + if k8serrors.IsNotFound(err) { + return "", nil + } + return "", err + } + data, ok := found.Data[objectStorage.Key] + if !ok { + return "", errors.New("failed to found the object storage configuration key from secret") + } + + var objectConfg mcoconfig.ObjectStorgeConf + err = yaml.Unmarshal(data, &objectConfg) + if err != nil { + return "", err + } + + caFile := objectConfg.Config.HTTPConfig.TLSConfig.CAFile + if caFile == "" { + return "", nil + } + return path.Dir(caFile), nil +} + func updateTenantID( newSpec *obsv1alpha1.ObservatoriumSpec, newTenant obsv1alpha1.APITenant, @@ -183,7 +235,7 @@ func GenerateAPIGatewayRoute( context.TODO(), types.NamespacedName{Name: apiGateway.Name, Namespace: apiGateway.Namespace}, &routev1.Route{}) - if err != nil && errors.IsNotFound(err) { + if err != nil && k8serrors.IsNotFound(err) { log.Info("Creating a new route to expose observatorium api", "apiGateway.Namespace", apiGateway.Namespace, "apiGateway.Name", apiGateway.Name, @@ -197,15 +249,19 @@ func GenerateAPIGatewayRoute( return nil, nil } -func newDefaultObservatoriumSpec(mco *mcov1beta2.MultiClusterObservability, - scSelected string) *obsv1alpha1.ObservatoriumSpec { +func newDefaultObservatoriumSpec(cl client.Client, mco *mcov1beta2.MultiClusterObservability, + scSelected string, tlsSecretMountPath string) (*obsv1alpha1.ObservatoriumSpec, error) { obs := &obsv1alpha1.ObservatoriumSpec{} obs.SecurityContext = &v1.SecurityContext{} obs.PullSecret = mcoconfig.GetImagePullSecret(mco.Spec) obs.NodeSelector = mco.Spec.NodeSelector obs.Tolerations = mco.Spec.Tolerations - obs.API = newAPISpec(mco) + obsApi, err := newAPISpec(cl, mco) + if err != nil { + return obs, err + } + obs.API = obsApi obs.Thanos = newThanosSpec(mco, scSelected) if util.ProxyEnvVarsAreSet() { obs.EnvVars = newEnvVars() @@ -220,8 +276,10 @@ func newDefaultObservatoriumSpec(mco *mcov1beta2.MultiClusterObservability, objStorageConf := mco.Spec.StorageConfig.MetricObjectStorage obs.ObjectStorageConfig.Thanos.Name = objStorageConf.Name obs.ObjectStorageConfig.Thanos.Key = objStorageConf.Key + obs.ObjectStorageConfig.Thanos.TLSSecretName = objStorageConf.TLSSecretName + obs.ObjectStorageConfig.Thanos.TLSSecretMountPath = tlsSecretMountPath } - return obs + return obs, nil } // return proxy variables @@ -314,7 +372,50 @@ func newAPITLS() obsv1alpha1.TLS { } } -func newAPISpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.APISpec { +func applyEndpointsSecret(c client.Client, eps []mcoutil.RemoteWriteEndpoint) error { + epsYaml, err := yaml.Marshal(eps) + if err != nil { + return err + } + epsYamlMap := map[string][]byte{} + epsYamlMap[endpointsKey] = epsYaml + epsSecret := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: endpointsConfigName, + Namespace: config.GetDefaultNamespace(), + }, + Data: epsYamlMap, + } + found := &v1.Secret{} + err = c.Get(context.TODO(), types.NamespacedName{Name: endpointsConfigName, + Namespace: config.GetDefaultNamespace()}, found) + if err != nil { + if k8serrors.IsNotFound(err) { + err = c.Create(context.TODO(), epsSecret) + if err != nil { + return err + } + } else { + return err + } + } else { + if !reflect.DeepEqual(epsYamlMap, found.Data) { + epsSecret.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion + err = c.Update(context.TODO(), epsSecret) + if err != nil { + return err + } + } + } + return nil + +} + +func newAPISpec(c client.Client, mco *mcov1beta2.MultiClusterObservability) (obsv1alpha1.APISpec, error) { apiSpec := obsv1alpha1.APISpec{} apiSpec.RBAC = newAPIRBAC() apiSpec.Tenants = newAPITenants() @@ -330,8 +431,58 @@ func newAPISpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.APISpec { if replace { apiSpec.Image = image } + apiSpec.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) apiSpec.ServiceMonitor = true - return apiSpec + if mco.Spec.StorageConfig.WriteStorage != nil { + eps := []mcoutil.RemoteWriteEndpoint{} + mountSecrets := []string{} + for _, storageConfig := range mco.Spec.StorageConfig.WriteStorage { + storageSecret := &v1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{Name: storageConfig.Name, + Namespace: mcoconfig.GetDefaultNamespace()}, storageSecret) + if err != nil { + log.Error(err, "Failed to get the secret", "name", storageConfig.Name) + return apiSpec, err + } else { + data, ok := storageSecret.Data[storageConfig.Key] + if !ok { + log.Error(err, "Invalid key in secret", "name", storageConfig.Name, "key", storageConfig.Key) + return apiSpec, errors.New(fmt.Sprintf("Invalid key %s in secret %s", storageConfig.Key, storageConfig.Name)) + } + ep := &mcoutil.RemoteWriteEndpointWithSecret{} + err = yaml.Unmarshal(data, ep) + if err != nil { + log.Error(err, "Failed to unmarshal data in secret", "name", storageConfig.Name) + return apiSpec, err + } + newEp := &mcoutil.RemoteWriteEndpoint{ + Name: storageConfig.Name, + URL: ep.URL, + } + if ep.HttpClientConfig != nil { + newConfig, mountS := mcoutil.Transform(*ep.HttpClientConfig) + mountSecrets = append(mountSecrets, mountS...) + newEp.HttpClientConfig = newConfig + } + eps = append(eps, *newEp) + } + } + + err := applyEndpointsSecret(c, eps) + if err != nil { + return apiSpec, err + } + if len(eps) > 0 { + apiSpec.AdditionalWriteEndpoints = &obsv1alpha1.EndpointsConfig{ + EndpointsConfigSecret: endpointsConfigName, + } + if len(mountSecrets) > 0 { + apiSpec.AdditionalWriteEndpoints.MountSecrets = mountSecrets + apiSpec.AdditionalWriteEndpoints.MountPath = mcoutil.MountPath + } + } + } + return apiSpec, nil } func newReceiversSpec( @@ -360,6 +511,10 @@ func newReceiversSpec( mco.Spec.StorageConfig.ReceiveStorageSize, scSelected) + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Receive != nil && + mco.Spec.AdvancedConfig.Receive.ServiceAccountAnnotations != nil { + receSpec.ServiceAccountAnnotations = mco.Spec.AdvancedConfig.Receive.ServiceAccountAnnotations + } return receSpec } @@ -405,6 +560,7 @@ func newRuleSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) o if found { ruleSpec.ReloaderImage = reloaderImage } + ruleSpec.ReloaderImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) ruleSpec.VolumeClaimTemplate = newVolumeClaimTemplate( mco.Spec.StorageConfig.RuleStorageSize, @@ -450,6 +606,11 @@ func newRuleSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) o } } + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Rule != nil && + mco.Spec.AdvancedConfig.Rule.ServiceAccountAnnotations != nil { + ruleSpec.ServiceAccountAnnotations = mco.Spec.AdvancedConfig.Rule.ServiceAccountAnnotations + } + return ruleSpec } @@ -467,6 +628,11 @@ func newStoreSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) storeSpec.ServiceMonitor = true storeSpec.Cache = newMemCacheSpec(mcoconfig.ThanosStoreMemcached, mco) + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Store != nil && + mco.Spec.AdvancedConfig.Store.ServiceAccountAnnotations != nil { + storeSpec.ServiceAccountAnnotations = mco.Spec.AdvancedConfig.Store.ServiceAccountAnnotations + } + return storeSpec } @@ -498,11 +664,13 @@ func newMemCacheSpec(component string, mco *mcov1beta2.MultiClusterObservability if found { memCacheSpec.Image = image } + memCacheSpec.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) found, image = mcoconfig.ReplaceImage(mco.Annotations, memCacheSpec.ExporterImage, mcoconfig.MemcachedExporterKey) if found { memCacheSpec.ExporterImage = image } + memCacheSpec.ExporterImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) if cacheConfig != nil && cacheConfig.MemoryLimitMB != nil { memCacheSpec.MemoryLimitMB = cacheConfig.MemoryLimitMB } else { @@ -539,6 +707,7 @@ func newThanosSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string) if replace { thanosSpec.Image = image } + thanosSpec.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) return thanosSpec } @@ -565,6 +734,10 @@ func newQuerySpec(mco *mcov1beta2.MultiClusterObservability) obsv1alpha1.QuerySp if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { querySpec.Resources = mcoconfig.GetResources(config.ThanosQuery, mco.Spec.AdvancedConfig) } + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Query != nil && + mco.Spec.AdvancedConfig.Query.ServiceAccountAnnotations != nil { + querySpec.ServiceAccountAnnotations = mco.Spec.AdvancedConfig.Query.ServiceAccountAnnotations + } return querySpec } @@ -578,8 +751,12 @@ func newReceiverControllerSpec(mco *mcov1beta2.MultiClusterObservability) obsv1a if !mcoconfig.WithoutResourcesRequests(mco.GetAnnotations()) { receiveControllerSpec.Resources = v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceCPU): resource.MustParse(mcoconfig.ObservatoriumReceiveControllerCPURequets), - v1.ResourceName(v1.ResourceMemory): resource.MustParse(mcoconfig.ObservatoriumReceiveControllerMemoryRequets), + v1.ResourceName(v1.ResourceCPU): resource.MustParse( + mcoconfig.ObservatoriumReceiveControllerCPURequets, + ), + v1.ResourceName(v1.ResourceMemory): resource.MustParse( + mcoconfig.ObservatoriumReceiveControllerMemoryRequets, + ), }, } } @@ -588,6 +765,7 @@ func newReceiverControllerSpec(mco *mcov1beta2.MultiClusterObservability) obsv1a if replace { receiveControllerSpec.Image = image } + receiveControllerSpec.ImagePullPolicy = mcoconfig.GetImagePullPolicy(mco.Spec) return receiveControllerSpec } @@ -629,6 +807,11 @@ func newCompactSpec(mco *mcov1beta2.MultiClusterObservability, scSelected string compactSpec.RetentionResolution1h = mcoconfig.RetentionResolution1h } + if mco.Spec.AdvancedConfig != nil && mco.Spec.AdvancedConfig.Compact != nil && + mco.Spec.AdvancedConfig.Compact.ServiceAccountAnnotations != nil { + compactSpec.ServiceAccountAnnotations = mco.Spec.AdvancedConfig.Compact.ServiceAccountAnnotations + } + compactSpec.VolumeClaimTemplate = newVolumeClaimTemplate( mco.Spec.StorageConfig.CompactStorageSize, scSelected) @@ -668,9 +851,13 @@ func deleteStoreSts(cl client.Client, name string, oldNum int32, newNum int32) e for i := newNum; i < oldNum; i++ { stsName := fmt.Sprintf("%s-thanos-store-shard-%d", name, i) found := &appsv1.StatefulSet{} - err := cl.Get(context.TODO(), types.NamespacedName{Name: stsName, Namespace: mcoconfig.GetDefaultNamespace()}, found) + err := cl.Get( + context.TODO(), + types.NamespacedName{Name: stsName, Namespace: mcoconfig.GetDefaultNamespace()}, + found, + ) if err != nil { - if !errors.IsNotFound(err) { + if !k8serrors.IsNotFound(err) { log.Error(err, "Failed to get statefulset", "name", stsName) return err } diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go index 92f879642..6ce7498a1 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/observatorium_test.go @@ -8,6 +8,8 @@ import ( "context" "testing" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -15,11 +17,13 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/yaml" mcoshared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" + oashared "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/shared" mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" mcoconfig "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" + mcoutil "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" observatoriumv1alpha1 "github.com/stolostron/observatorium-operator/api/v1alpha1" ) @@ -49,8 +53,15 @@ func TestNewDefaultObservatoriumSpec(t *testing.T) { Spec: mcov1beta2.MultiClusterObservabilitySpec{ StorageConfig: &mcov1beta2.StorageConfig{ MetricObjectStorage: &mcoshared.PreConfiguredStorage{ - Key: "key", - Name: "name", + Key: "key", + Name: "name", + TLSSecretName: "secret", + }, + WriteStorage: []*mcoshared.PreConfiguredStorage{ + { + Key: "write_key", + Name: "write_name", + }, }, StorageClass: storageClassName, AlertmanagerStorageSize: "1Gi", @@ -66,13 +77,29 @@ func TestNewDefaultObservatoriumSpec(t *testing.T) { }, } - obs := newDefaultObservatoriumSpec(mco, storageClassName) + writeStorageS := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "write_name", + Namespace: config.GetDefaultNamespace(), + }, + Type: "Opaque", + Data: map[string][]byte{ + "write_key": []byte(`url: http://remotewrite/endpoint +`), + }, + } + + objs := []runtime.Object{mco, writeStorageS} + // Create a fake client to mock API calls. + cl := fake.NewFakeClient(objs...) + + obs, _ := newDefaultObservatoriumSpec(cl, mco, storageClassName, "") receiversStorage := obs.Thanos.Receivers.VolumeClaimTemplate.Spec.Resources.Requests["storage"] ruleStorage := obs.Thanos.Rule.VolumeClaimTemplate.Spec.Resources.Requests["storage"] storeStorage := obs.Thanos.Store.VolumeClaimTemplate.Spec.Resources.Requests["storage"] compactStorage := obs.Thanos.Compact.VolumeClaimTemplate.Spec.Resources.Requests["storage"] - obs = newDefaultObservatoriumSpec(mco, storageClassName) + obs, _ = newDefaultObservatoriumSpec(cl, mco, storageClassName, "") if *obs.Thanos.Receivers.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || *obs.Thanos.Rule.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || *obs.Thanos.Store.VolumeClaimTemplate.Spec.StorageClassName != storageClassName || @@ -83,9 +110,28 @@ func TestNewDefaultObservatoriumSpec(t *testing.T) { compactStorage.String() != statefulSetSize || obs.ObjectStorageConfig.Thanos.Key != "key" || obs.ObjectStorageConfig.Thanos.Name != "name" || - obs.Thanos.Query.LookbackDelta != "600s" { + obs.ObjectStorageConfig.Thanos.TLSSecretName != "secret" || + obs.Thanos.Query.LookbackDelta != "600s" || + obs.API.AdditionalWriteEndpoints.EndpointsConfigSecret != endpointsConfigName { t.Errorf("Failed to newDefaultObservatorium") } + + endpointS := &corev1.Secret{} + err := cl.Get(context.TODO(), types.NamespacedName{ + Name: endpointsConfigName, + Namespace: config.GetDefaultNamespace(), + }, endpointS) + if err != nil { + t.Errorf("Failed to get endpoint config secret due to %v", err) + } + endpointConfig := []mcoutil.RemoteWriteEndpoint{} + err = yaml.Unmarshal(endpointS.Data[endpointsKey], &endpointConfig) + if err != nil { + t.Errorf("Failed to unmarshal endpoint secret due to %v", err) + } + if endpointConfig[0].Name != "write_name" || endpointConfig[0].URL.String() != "http://remotewrite/endpoint" { + t.Errorf("Wrong endpoint config: %s, %s", endpointConfig[0].Name, endpointConfig[0].URL.String()) + } } func TestMergeVolumeClaimTemplate(t *testing.T) { @@ -157,7 +203,7 @@ func TestNoUpdateObservatoriumCR(t *testing.T) { ) oldSpec := observatoriumCRFound.Spec - newSpec := newDefaultObservatoriumSpec(mco, storageClassName) + newSpec, _ := newDefaultObservatoriumSpec(cl, mco, storageClassName, "") oldSpecBytes, _ := yaml.Marshal(oldSpec) newSpecBytes, _ := yaml.Marshal(newSpec) @@ -170,3 +216,107 @@ func TestNoUpdateObservatoriumCR(t *testing.T) { t.Errorf("Failed to update observatorium due to %v", err) } } + +func TestGetTLSSecretMountPath(t *testing.T) { + + testCaseList := []struct { + name string + secret *corev1.Secret + storeConfig *oashared.PreConfiguredStorage + expected string + }{ + + { + "no tls secret defined", + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: config.GetDefaultNamespace(), + }, + Type: "Opaque", + Data: map[string][]byte{ + "thanos.yaml": []byte(`type: s3 +config: + bucket: s3 + endpoint: s3.amazonaws.com +`), + }, + }, + &oashared.PreConfiguredStorage{ + Key: "thanos.yaml", + Name: "test", + }, + "", + }, + { + "has tls config defined", + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-1", + Namespace: config.GetDefaultNamespace(), + }, + Type: "Opaque", + Data: map[string][]byte{ + "thanos.yaml": []byte(`type: s3 +config: + bucket: s3 + endpoint: s3.amazonaws.com + insecure: true + http_config: + tls_config: + ca_file: /etc/minio/certs/ca.crt + cert_file: /etc/minio/certs/public.crt + key_file: /etc/minio/certs/private.key + insecure_skip_verify: true +`), + }, + }, + &oashared.PreConfiguredStorage{ + Key: "thanos.yaml", + Name: "test-1", + }, + "/etc/minio/certs", + }, + { + "has tls config defined in root path", + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-2", + Namespace: config.GetDefaultNamespace(), + }, + Type: "Opaque", + Data: map[string][]byte{ + "thanos.yaml": []byte(`type: s3 +config: + bucket: s3 + endpoint: s3.amazonaws.com + insecure: true + http_config: + tls_config: + ca_file: /ca.crt + cert_file: /etc/minio/certs/public.crt + key_file: /etc/minio/certs/private.key + insecure_skip_verify: true +`), + }, + }, + &oashared.PreConfiguredStorage{ + Key: "thanos.yaml", + Name: "test-2", + }, + "/", + }, + } + + client := fake.NewFakeClient([]runtime.Object{}...) + for _, c := range testCaseList { + err := client.Create(context.TODO(), c.secret) + if err != nil { + t.Errorf("failed to create object storage secret, due to %v", err) + } + path, err := getTLSSecretMountPath(client, c.storeConfig) + if path != c.expected { + t.Errorf("case (%v) output: (%v) is not the expected: (%v)", c.name, path, c.expected) + } + } +} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go index 3aba5c82e..d6557d4e2 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration.go @@ -12,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" @@ -27,7 +28,7 @@ func createOrUpdateObservabilityStorageVersionMigrationResource(client client.Cl mco *mcov1beta2.MultiClusterObservability) error { storageVersionMigrationName := storageVersionMigrationPrefix if mco != nil { - storageVersionMigrationName += mco.GetName() + storageVersionMigrationName += "-" + mco.GetName() } storageVersionMigration := &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ @@ -42,6 +43,10 @@ func createOrUpdateObservabilityStorageVersionMigrationResource(client client.Cl }, } + if err := controllerutil.SetControllerReference(mco, storageVersionMigration, scheme); err != nil { + log.Error(err, "Failed to set controller reference", "name", storageVersionMigrationName) + } + found := &migrationv1alpha1.StorageVersionMigration{} err := client.Get(context.TODO(), types.NamespacedName{Name: storageVersionMigrationName}, found) if err != nil && errors.IsNotFound(err) { @@ -71,26 +76,3 @@ func createOrUpdateObservabilityStorageVersionMigrationResource(client client.Cl log.Info("StorageVersionMigration already existed/unchanged", "name", storageVersionMigrationName) return nil } - -// cleanObservabilityStorageVersionMigrationResource delete the StorageVersionMigration source if found -func cleanObservabilityStorageVersionMigrationResource(client client.Client, mco *mcov1beta2.MultiClusterObservability) error { - storageVersionMigrationName := storageVersionMigrationPrefix - if mco != nil { - storageVersionMigrationName += mco.GetName() - } - found := &migrationv1alpha1.StorageVersionMigration{} - err := client.Get(context.TODO(), types.NamespacedName{Name: storageVersionMigrationName}, found) - if err != nil && errors.IsNotFound(err) { - log.Info("StorageVersionMigration doesn't exist", "name", storageVersionMigrationName) - } else if err != nil { - log.Error(err, "Failed to check StorageVersionMigration", "name", storageVersionMigrationName) - return err - } else { - err = client.Delete(context.TODO(), found) - if err != nil { - log.Error(err, "Failed to delete StorageVersionMigration", "name", storageVersionMigrationName) - return err - } - } - return nil -} diff --git a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go index e8d3e0fbc..66fcc3ec8 100644 --- a/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go +++ b/operators/multiclusterobservability/controllers/multiclusterobservability/storageversionmigration_test.go @@ -40,7 +40,7 @@ func TestCreateOrUpdateObservabilityStorageVersionMigrationResource(t *testing.T } // Test scenario in which StorageVersionMigration updated by others - svmName := storageVersionMigrationPrefix + mco.GetName() + svmName := storageVersionMigrationPrefix + "-" + mco.GetName() svm := &migrationv1alpha1.StorageVersionMigration{ ObjectMeta: metav1.ObjectMeta{ Name: svmName, @@ -67,11 +67,6 @@ func TestCreateOrUpdateObservabilityStorageVersionMigrationResource(t *testing.T t.Fatalf("Failed to update StorageVersionMigration (%s)", svmName) } - err = cleanObservabilityStorageVersionMigrationResource(c, mco) - if err != nil { - t.Fatalf("Failed to clean the StorageVersionMigration") - } - // Test clean scenario in which StorageVersionMigration is already removed err = createOrUpdateObservabilityStorageVersionMigrationResource(c, s, mco) if err != nil { @@ -82,9 +77,4 @@ func TestCreateOrUpdateObservabilityStorageVersionMigrationResource(t *testing.T if err != nil { t.Fatalf("Failed to delete (%s): (%v)", svmName, err) } - - err = cleanObservabilityStorageVersionMigrationResource(c, mco) - if err != nil { - t.Fatalf("Failed to clean the StorageVersionMigration") - } } diff --git a/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go b/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go index 7ca9710d4..86e653f32 100644 --- a/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go +++ b/operators/multiclusterobservability/controllers/placementrule/endpoint_metrics_operator.go @@ -111,23 +111,10 @@ func updateRes(r *resource.Resource, // set images for components in managed clusters if r.GetKind() == "ConfigMap" && r.GetName() == operatorconfig.ImageConfigMap { images := obj.(*corev1.ConfigMap).Data - for key, _ := range images { - if key == operatorconfig.ConfigmapReloaderKey { - found, image := mcoconfig.ReplaceImage( - mco.Annotations, - mcoconfig.ConfigmapReloaderImgRepo+"/"+operatorconfig.ImageKeyNameMap[operatorconfig.ConfigmapReloaderKey], - key) - if found { - obj.(*corev1.ConfigMap).Data[key] = image - } - } else { - found, image := mcoconfig.ReplaceImage( - mco.Annotations, - mcoconfig.DefaultImgRepository+"/"+operatorconfig.ImageKeyNameMap[key], - key) - if found { - obj.(*corev1.ConfigMap).Data[key] = image - } + for key := range images { + found, image := mcoconfig.ReplaceImage(mco.Annotations, images[key], key) + if found { + obj.(*corev1.ConfigMap).Data[key] = image } } } @@ -159,21 +146,21 @@ func getImage(mco *mcov1beta2.MultiClusterObservability, return image } -func loadPromTemplates(mco *mcov1beta2.MultiClusterObservability) ( - []runtime.RawExtension, error) { - // load and render promTemplates - promTemplates, err := templates.GetOrLoadPrometheusTemplates(templatesutil.GetTemplateRenderer()) - if err != nil { - log.Error(err, "Failed to load templates") - return nil, err - } - rawExtensionList := []runtime.RawExtension{} - for _, r := range promTemplates { - obj, err := updateRes(r, mco) - if err != nil { - return nil, err - } - rawExtensionList = append(rawExtensionList, runtime.RawExtension{Object: obj}) - } - return rawExtensionList, nil -} +// func loadPromTemplates(mco *mcov1beta2.MultiClusterObservability) ( +// []runtime.RawExtension, error) { +// // load and render promTemplates +// promTemplates, err := templates.GetOrLoadPrometheusTemplates(templatesutil.GetTemplateRenderer()) +// if err != nil { +// log.Error(err, "Failed to load templates") +// return nil, err +// } +// rawExtensionList := []runtime.RawExtension{} +// for _, r := range promTemplates { +// obj, err := updateRes(r, mco) +// if err != nil { +// return nil, err +// } +// rawExtensionList = append(rawExtensionList, runtime.RawExtension{Object: obj}) +// } +// return rawExtensionList, nil +// } diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go index ea4e9af18..acb9d6cb9 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret.go @@ -44,7 +44,8 @@ func generateHubInfoSecret(client client.Client, obsNamespace string, return nil, err } } else { - // for KinD support, the managedcluster and hub cluster are assumed in the same cluster, the observatorium-api will be accessed through k8s service FQDN + port + // for KinD support, the managedcluster and hub cluster are assumed in the same cluster, the observatorium-api + // will be accessed through k8s service FQDN + port obsApiRouteHost = config.GetOperandNamePrefix() + "observatorium-api" + "." + config.GetDefaultNamespace() + ".svc.cluster.local:8080" alertmanagerEndpoint = config.AlertmanagerServiceName + "." + config.GetDefaultNamespace() + ".svc.cluster.local:9095" var err error diff --git a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go index 32c472da5..685baf5ec 100644 --- a/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/hub_info_secret_test.go @@ -110,7 +110,7 @@ func TestNewSecret(t *testing.T) { initSchema(t) objs := []runtime.Object{newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestIngressController(), newTestRouteCASecret()} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() hubInfo, err := generateHubInfoSecret(c, mcoNamespace, namespace, true) if err != nil { @@ -130,7 +130,7 @@ func TestNewBYOSecret(t *testing.T) { initSchema(t) objs := []runtime.Object{newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestAmRouteBYOCA(), newTestAmRouteBYOCert()} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() hubInfo, err := generateHubInfoSecret(c, mcoNamespace, namespace, true) if err != nil { diff --git a/operators/multiclusterobservability/controllers/placementrule/manifestwork.go b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go index 6672ddb4e..961778f3b 100644 --- a/operators/multiclusterobservability/controllers/placementrule/manifestwork.go +++ b/operators/multiclusterobservability/controllers/placementrule/manifestwork.go @@ -50,23 +50,10 @@ var ( endpointMetricsOperatorDeploy *appsv1.Deployment imageListConfigMap *corev1.ConfigMap - rawExtensionList []runtime.RawExtension - promRawExtensionList []runtime.RawExtension + rawExtensionList []runtime.RawExtension + //promRawExtensionList []runtime.RawExtension ) -type MetricsAllowlist struct { - NameList []string `yaml:"names"` - MatchList []string `yaml:"matches"` - RenameMap map[string]string `yaml:"renames"` - RuleList []Rule `yaml:"rules"` -} - -// Rule is the struct for recording rules and alert rules -type Rule struct { - Record string `yaml:"record"` - Expr string `yaml:"expr"` -} - func deleteManifestWork(c client.Client, name string, namespace string) error { addon := &workv1.ManifestWork{ @@ -173,7 +160,7 @@ func createManifestwork(c client.Client, work *workv1.ManifestWork) error { if found.GetDeletionTimestamp() != nil { log.Info("Existing manifestwork is terminating, skip and reconcile later") - return errors.New("Existing manifestwork is terminating, skip and reconcile later") + return errors.New("existing manifestwork is terminating, skip and reconcile later") } manifests := work.Spec.Workload.Manifests @@ -373,7 +360,8 @@ func createManifestWorks(c client.Client, restMapper meta.RESTMapper, return err } -// generateAmAccessorTokenSecret generates the secret that contains the access_token for the Alertmanager in the Hub cluster +// generateAmAccessorTokenSecret generates the secret that contains the access_token +// for the Alertmanager in the Hub cluster func generateAmAccessorTokenSecret(client client.Client) (*corev1.Secret, error) { amAccessorSA := &corev1.ServiceAccount{} err := client.Get(context.TODO(), types.NamespacedName{Name: config.AlertmanagerAccessorSAName, @@ -392,8 +380,16 @@ func generateAmAccessorTokenSecret(client client.Client) (*corev1.Secret, error) } if tokenSrtName == "" { - log.Error(err, "no token secret for Alertmanager accessor serviceaccount", "name", config.AlertmanagerAccessorSAName) - return nil, fmt.Errorf("no token secret for Alertmanager accessor serviceaccount: %s", config.AlertmanagerAccessorSAName) + log.Error( + err, + "no token secret for Alertmanager accessor serviceaccount", + "name", + config.AlertmanagerAccessorSAName, + ) + return nil, fmt.Errorf( + "no token secret for Alertmanager accessor serviceaccount: %s", + config.AlertmanagerAccessorSAName, + ) } tokenSrt := &corev1.Secret{} @@ -489,20 +485,32 @@ func generateMetricsListCM(client client.Client) (*corev1.ConfigMap, error) { Data: map[string]string{}, } - allowlist, err := getAllowList(client, operatorconfig.AllowlistConfigMapName) + allowlist, ocp3Allowlist, err := getAllowList(client, operatorconfig.AllowlistConfigMapName) if err != nil { log.Error(err, "Failed to get metrics allowlist configmap "+operatorconfig.AllowlistConfigMapName) return nil, err } - customAllowlist, err := getAllowList(client, config.AllowlistCustomConfigMapName) + customAllowlist, _, err := getAllowList(client, config.AllowlistCustomConfigMapName) if err == nil { allowlist.NameList = mergeMetrics(allowlist.NameList, customAllowlist.NameList) allowlist.MatchList = mergeMetrics(allowlist.MatchList, customAllowlist.MatchList) - allowlist.RuleList = append(allowlist.RuleList, customAllowlist.RuleList...) + allowlist.CollectRuleGroupList = mergeCollectorRuleGroupList(allowlist.CollectRuleGroupList, customAllowlist.CollectRuleGroupList) + if customAllowlist.RecordingRuleList != nil { + allowlist.RecordingRuleList = append(allowlist.RecordingRuleList, customAllowlist.RecordingRuleList...) + } else { + //check if rules are specified for backward compatibility + allowlist.RecordingRuleList = append(allowlist.RecordingRuleList, customAllowlist.RuleList...) + } for k, v := range customAllowlist.RenameMap { allowlist.RenameMap[k] = v } + ocp3Allowlist.NameList = mergeMetrics(ocp3Allowlist.NameList, customAllowlist.NameList) + ocp3Allowlist.MatchList = mergeMetrics(ocp3Allowlist.MatchList, customAllowlist.MatchList) + ocp3Allowlist.RuleList = append(ocp3Allowlist.RuleList, customAllowlist.RuleList...) + for k, v := range customAllowlist.RenameMap { + ocp3Allowlist.RenameMap[k] = v + } } else { log.Info("There is no custom metrics allowlist configmap in the cluster") } @@ -513,10 +521,16 @@ func generateMetricsListCM(client client.Client) (*corev1.ConfigMap, error) { return nil, err } metricsAllowlist.Data["metrics_list.yaml"] = string(data) + data, err = yaml.Marshal(ocp3Allowlist) + if err != nil { + log.Error(err, "Failed to marshal allowlist data") + return nil, err + } + metricsAllowlist.Data["ocp311_metrics_list.yaml"] = string(data) return metricsAllowlist, nil } -func getAllowList(client client.Client, name string) (*MetricsAllowlist, error) { +func getAllowList(client client.Client, name string) (*operatorconfig.MetricsAllowlist, *operatorconfig.MetricsAllowlist, error) { found := &corev1.ConfigMap{} namespacedName := types.NamespacedName{ Name: name, @@ -524,15 +538,21 @@ func getAllowList(client client.Client, name string) (*MetricsAllowlist, error) } err := client.Get(context.TODO(), namespacedName, found) if err != nil { - return nil, err + return nil, nil, err } - allowlist := &MetricsAllowlist{} + allowlist := &operatorconfig.MetricsAllowlist{} err = yaml.Unmarshal([]byte(found.Data["metrics_list.yaml"]), allowlist) if err != nil { - log.Error(err, "Failed to unmarshal data in configmap "+name) - return nil, err + log.Error(err, "Failed to unmarshal metrics_list.yaml data in configmap "+name) + return nil, nil, err + } + ocp3Allowlist := &operatorconfig.MetricsAllowlist{} + err = yaml.Unmarshal([]byte(found.Data["ocp311_metrics_list.yaml"]), ocp3Allowlist) + if err != nil { + log.Error(err, "Failed to unmarshal ocp311_metrics_list data in configmap "+name) + return nil, nil, err } - return allowlist, nil + return allowlist, ocp3Allowlist, nil } func mergeMetrics(defaultAllowlist []string, customAllowlist []string) []string { @@ -563,6 +583,26 @@ func mergeMetrics(defaultAllowlist []string, customAllowlist []string) []string return mergedMetrics } +func mergeCollectorRuleGroupList(defaultCollectRuleGroupList []operatorconfig.CollectRuleGroup, customCollectRuleGroupList []operatorconfig.CollectRuleGroup) []operatorconfig.CollectRuleGroup { + deletedCollectRuleGroups := map[string]bool{} + for _, collectRuleGroup := range customCollectRuleGroupList { + if strings.HasPrefix(collectRuleGroup.Name, "-") { + deletedCollectRuleGroups[strings.TrimPrefix(collectRuleGroup.Name, "-")] = true + } + } + + mergedCollectRuleGroups := []operatorconfig.CollectRuleGroup{} + for _, collectRuleGroup := range defaultCollectRuleGroupList { + if !deletedCollectRuleGroups[collectRuleGroup.Name] { + mergedCollectRuleGroups = append(mergedCollectRuleGroups, collectRuleGroup) + } + } + + config.CollectRulesEnabled = len(mergedCollectRuleGroups) == 2 + + return mergedCollectRuleGroups +} + func getObservabilityAddon(c client.Client, namespace string, mco *mcov1beta2.MultiClusterObservability) (*mcov1beta1.ObservabilityAddon, error) { found := &mcov1beta1.ObservabilityAddon{} diff --git a/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go b/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go index 9dc957b80..b252e7aaf 100644 --- a/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/manifestwork_test.go @@ -102,7 +102,54 @@ func NewMetricsAllowListCM() *corev1.ConfigMap { - b renames: a: c - rules: + recording_rules: + - record: f + expr: g + collect_rules: + - name: keepGroup + annotations: + summary: + description: + selector: + matchExpressions: + - key: clusterType + operator: NotIn + values: ["SNO"] + rules: + - collect: c + annotations: + summary: + description: + expr: e + for: 2m + matches: + - __name__="foo" + - name: discardGroup + annotations: + summary: + description: + selector: + matchExpressions: + - key: clusterType + operator: In + values: ["SNO"] + rules: + - collect: d + annotations: + summary: + description: + expr: d + for: 2m + names: + - foobar_metric +`, + "ocp311_metrics_list.yaml": ` + names: + - a + - b + renames: + a: c + recording_rules: - record: f expr: g `}, @@ -124,6 +171,8 @@ func NewMetricsCustomAllowListCM() *corev1.ConfigMap { rules: - record: h expr: i + collect_rules: + - name: -discard `}, } } @@ -210,7 +259,7 @@ func TestManifestWork(t *testing.T) { newImageRegistry("image_registry", namespace, "registry_server", "custorm_pull_secret"), newPullSecret("custorm_pull_secret", namespace, []byte("custorm")), } - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() wd, err := os.Getwd() if err != nil { t.Fatalf("Failed to get work dir: (%v)", err) @@ -225,7 +274,7 @@ func TestManifestWork(t *testing.T) { } works, crdWork, _, err := generateGlobalManifestResources(c, newTestMCO()) if err != nil { - t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + t.Fatalf("Failed to get global manifestwork resource: (%v)", err) } t.Logf("work size is %d", len(works)) if hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, true); err != nil { @@ -253,7 +302,7 @@ func TestManifestWork(t *testing.T) { pullSecret = nil works, crdWork, _, err = generateGlobalManifestResources(c, newTestMCO()) if err != nil { - t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + t.Fatalf("Failed to get global manifestwork resource: (%v)", err) } err = createManifestWorks(c, nil, namespace, clusterName, newTestMCO(), works, crdWork, endpointMetricsOperatorDeploy, hubInfoSecret, false) if err != nil { @@ -291,7 +340,7 @@ func TestManifestWork(t *testing.T) { works, crdWork, _, err = generateGlobalManifestResources(c, newTestMCO()) if err != nil { - t.Fatalf("Failed to get global manifestwork resourc: (%v)", err) + t.Fatalf("Failed to get global manifestwork resource: (%v)", err) } if hubInfoSecret, err = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, true); err != nil { diff --git a/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go b/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go index 3d5ce32cb..5a1155f35 100644 --- a/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/obsaddon_test.go @@ -20,7 +20,7 @@ func TestObsAddonCR(t *testing.T) { initSchema(t) objs := []runtime.Object{newTestObsApiRoute()} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err := createObsAddon(c, namespace) if err != nil { @@ -79,7 +79,7 @@ func TestStaleObsAddonCR(t *testing.T) { initSchema(t) objs := []runtime.Object{newTestObsApiRoute()} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err := createObsAddon(c, namespace) if err != nil { diff --git a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go index 28939a0ba..c7c1b1154 100644 --- a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go +++ b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller.go @@ -38,7 +38,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" commonutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" - mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workv1 "open-cluster-management.io/api/work/v1" @@ -52,8 +52,8 @@ const ( ) var ( - log = logf.Log.WithName("controller_placementrule") - watchNamespace = config.GetDefaultNamespace() + log = logf.Log.WithName("controller_placementrule") + //watchNamespace = config.GetDefaultNamespace() isCRoleCreated = false isClusterManagementAddonCreated = false isplacementControllerRunnning = false @@ -114,8 +114,13 @@ func (r *PlacementRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + if !deleteAll && !mco.Spec.ObservabilityAddonSpec.EnableMetrics { + reqLogger.Info("EnableMetrics is set to false. Delete Observability addons") + deleteAll = true + } + // check if the MCH CRD exists - mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + mchCrdExists := r.CRDMap[config.MCHCrdName] // requeue after 10 seconds if the mch crd exists and image image manifests map is empty if mchCrdExists && len(config.GetImageManifests()) == 0 { // if the mch CR is not ready, then requeue the request after 10s @@ -127,7 +132,8 @@ func (r *PlacementRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reques var err error managedClusterObsCert, err = generateObservabilityServerCACerts(r.Client) if err != nil && k8serrors.IsNotFound(err) { - // if the servser certificate for managedcluster is not ready, then requeue the request after 10s to avoid useless reconcile loop. + // if the servser certificate for managedcluster is not ready, then + // requeue the request after 10s to avoid useless reconcile loop. return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } } @@ -148,7 +154,14 @@ func (r *PlacementRuleReconciler) Reconcile(ctx context.Context, req ctrl.Reques } if !deleteAll { - res, err := createAllRelatedRes(r.Client, r.RESTMapper, req, mco, obsAddonList, r.CRDMap[config.IngressControllerCRD]) + res, err := createAllRelatedRes( + r.Client, + r.RESTMapper, + req, + mco, + obsAddonList, + r.CRDMap[config.IngressControllerCRD], + ) if err != nil { return res, err } @@ -316,7 +329,15 @@ func createAllRelatedRes( request.Namespace == config.GetDefaultNamespace() || (request.Namespace == "" && request.Name == managedCluster) || request.Namespace == managedCluster { - log.Info("Monitoring operator should be installed in cluster", "cluster_name", managedCluster, "request.name", request.Name, "request.namespace", request.Namespace) + log.Info( + "Monitoring operator should be installed in cluster", + "cluster_name", + managedCluster, + "request.name", + request.Name, + "request.namespace", + request.Namespace, + ) if openshiftVersion == "3" { err = createManagedClusterRes(c, restMapper, mco, managedCluster, managedCluster, @@ -352,7 +373,7 @@ func createAllRelatedRes( } if failedCreateManagedClusterRes || failedDeleteOba { - return ctrl.Result{}, errors.New("Failed to create managedcluster resources or" + + return ctrl.Result{}, errors.New("failed to create managedcluster resources or" + " failed to delete observabilityaddon, skip and reconcile later") } @@ -461,7 +482,7 @@ func updateManagedClusterList(obj client.Object) { // SetupWithManager sets up the controller with the Manager. func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { c := mgr.GetClient() - ingressCtlCrdExists, _ := r.CRDMap[config.IngressControllerCRD] + ingressCtlCrdExists := r.CRDMap[config.IngressControllerCRD] clusterPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { log.Info("CreateFunc", "managedCluster", e.Object.GetName()) @@ -516,7 +537,13 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { DeleteFunc: func(e event.DeleteEvent) bool { if e.Object.GetName() == obsAddonName && e.Object.GetLabels()[ownerLabelKey] == ownerLabelValue { - log.Info("DeleteFunc", "obsAddonNamespace", e.Object.GetNamespace(), "obsAddonName", e.Object.GetName()) + log.Info( + "DeleteFunc", + "obsAddonNamespace", + e.Object.GetNamespace(), + "obsAddonName", + e.Object.GetName(), + ) /* #nosec */ removePostponeDeleteAnnotationForManifestwork(c, e.Object.GetNamespace()) return true @@ -528,7 +555,10 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { mcoPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { // generate the image pull secret - pullSecret, _ = generatePullSecret(c, config.GetImagePullSecret(e.Object.(*mcov1beta2.MultiClusterObservability).Spec)) + pullSecret, _ = generatePullSecret( + c, + config.GetImagePullSecret(e.Object.(*mcov1beta2.MultiClusterObservability).Spec), + ) return true }, UpdateFunc: func(e event.UpdateEvent) bool { @@ -538,7 +568,10 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { e.ObjectOld.(*mcov1beta2.MultiClusterObservability).Spec.ObservabilityAddonSpec) { if e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec.ImagePullSecret != e.ObjectOld.(*mcov1beta2.MultiClusterObservability).Spec.ImagePullSecret { // regenerate the image pull secret - pullSecret, _ = generatePullSecret(c, config.GetImagePullSecret(e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec)) + pullSecret, _ = generatePullSecret( + c, + config.GetImagePullSecret(e.ObjectNew.(*mcov1beta2.MultiClusterObservability).Spec), + ) } return true } @@ -615,7 +648,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { if e.Object.GetName() == config.OpenshiftIngressOperatorCRName && e.Object.GetNamespace() == config.OpenshiftIngressOperatorNamespace { // generate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -625,7 +663,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && e.ObjectNew.GetNamespace() == config.OpenshiftIngressOperatorNamespace { // regenerate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -634,7 +677,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { if e.Object.GetName() == config.OpenshiftIngressOperatorCRName && e.Object.GetNamespace() == config.OpenshiftIngressOperatorNamespace { // regenerate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -647,7 +695,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { // generate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -658,7 +711,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { (e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCAName || e.ObjectNew.GetName() == config.AlertmanagerRouteBYOCERTName) { // regenerate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -668,7 +726,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { (e.Object.GetName() == config.AlertmanagerRouteBYOCAName || e.Object.GetName() == config.AlertmanagerRouteBYOCERTName) { // regenerate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -682,7 +745,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { (e.Object.GetNamespace() == config.OpenshiftIngressNamespace && e.Object.GetName() == config.OpenshiftIngressDefaultCertName) { // generate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -694,7 +762,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { e.ObjectNew.GetName() == config.OpenshiftIngressDefaultCertName)) && e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() { // regenerate the hubInfo secret - hubInfoSecret, _ = generateHubInfoSecret(c, config.GetDefaultNamespace(), spokeNameSpace, ingressCtlCrdExists) + hubInfoSecret, _ = generateHubInfoSecret( + c, + config.GetDefaultNamespace(), + spokeNameSpace, + ingressCtlCrdExists, + ) return true } return false @@ -742,6 +815,7 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&clusterv1.ManagedCluster{}, builder.WithPredicates(clusterPred)). // secondary watch for observabilityaddon Watches(&source.Kind{Type: &mcov1beta1.ObservabilityAddon{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(obsAddonPred)). + // secondary watch for MCO Watches(&source.Kind{Type: &mcov1beta2.MultiClusterObservability{}}, handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { return []reconcile.Request{ @@ -750,10 +824,13 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { }}, } }), builder.WithPredicates(mcoPred)). + // secondary watch for custom allowlist configmap Watches(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(customAllowlistPred)). + // secondary watch for certificate secrets Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(certSecretPred)). + // secondary watch for alertmanager accessor serviceaccount Watches(&source.Kind{Type: &corev1.ServiceAccount{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(amAccessorSAPred)) @@ -778,19 +855,27 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { } // secondary watch for manifestwork - ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &workv1.ManifestWork{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(workPred)) + ctrBuilder = ctrBuilder.Watches( + &source.Kind{Type: &workv1.ManifestWork{}}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(workPred), + ) } - mchGroupKind := schema.GroupKind{Group: mchv1.SchemeGroupVersion.Group, Kind: "MultiClusterHub"} - if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.SchemeGroupVersion.Version); err == nil { + mchGroupKind := schema.GroupKind{Group: mchv1.GroupVersion.Group, Kind: "MultiClusterHub"} + if _, err := r.RESTMapper.RESTMapping(mchGroupKind, mchv1.GroupVersion.Version); err == nil { mchPred := predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { // this is for operator restart, the mch CREATE event will be caught and the mch should be ready if e.Object.GetNamespace() == config.GetMCONamespace() && e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && e.Object.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion { - // only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully - ok, err := config.ReadImageManifestConfigMap(c, e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion) + // only read the image manifests configmap and enqueue the request when the MCH is + // installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap( + c, + e.Object.(*mchv1.MultiClusterHub).Status.CurrentVersion, + ) if err != nil { return false } @@ -803,8 +888,12 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() && e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion != "" && e.ObjectNew.(*mchv1.MultiClusterHub).Status.DesiredVersion == e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion { - /// only read the image manifests configmap and enqueue the request when the MCH is installed/upgraded successfully - ok, err := config.ReadImageManifestConfigMap(c, e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion) + // / only read the image manifests configmap and enqueue the request when the MCH is + // installed/upgraded successfully + ok, err := config.ReadImageManifestConfigMap( + c, + e.ObjectNew.(*mchv1.MultiClusterHub).Status.CurrentVersion, + ) if err != nil { return false } @@ -820,23 +909,29 @@ func (r *PlacementRuleReconciler) SetupWithManager(mgr ctrl.Manager) error { if ingressCtlCrdExists { // secondary watch for default ingresscontroller ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &operatorv1.IngressController{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(ingressControllerPred)). + // secondary watch for alertmanager route byo cert secrets Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(amRouterCertSecretPred)). + // secondary watch for openshift route ca secret Watches(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(routeCASecretPred)) } - mchCrdExists, _ := r.CRDMap[config.MCHCrdName] + mchCrdExists := r.CRDMap[config.MCHCrdName] if mchCrdExists { // secondary watch for MCH - ctrBuilder = ctrBuilder.Watches(&source.Kind{Type: &mchv1.MultiClusterHub{}}, handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { - return []reconcile.Request{ - {NamespacedName: types.NamespacedName{ - Name: config.MCHUpdatedRequestName, - Namespace: obj.GetNamespace(), - }}, - } - }), builder.WithPredicates(mchPred)) + ctrBuilder = ctrBuilder.Watches( + &source.Kind{Type: &mchv1.MultiClusterHub{}}, + handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { + return []reconcile.Request{ + {NamespacedName: types.NamespacedName{ + Name: config.MCHUpdatedRequestName, + Namespace: obj.GetNamespace(), + }}, + } + }), + builder.WithPredicates(mchPred), + ) } } diff --git a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go index 907bf9750..9403b5aba 100644 --- a/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/placementrule_controller_test.go @@ -28,7 +28,7 @@ import ( mcov1beta2 "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/api/v1beta2" "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/rendering/templates" - mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workv1 "open-cluster-management.io/api/work/v1" @@ -128,7 +128,7 @@ func TestObservabilityAddonController(t *testing.T) { } objs := []runtime.Object{mco, pull, newTestObsApiRoute(), newTestAlertmanagerRoute(), newTestIngressController(), newTestRouteCASecret(), newCASecret(), newCertSecret(mcoNamespace), NewMetricsAllowListCM(), NewAmAccessorSA(), NewAmAccessorTokenSecret(), newManagedClusterAddon(), deprecatedRole} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() r := &PlacementRuleReconciler{Client: c, Scheme: s, CRDMap: map[string]bool{config.IngressControllerCRD: true}} wd, err := os.Getwd() @@ -140,9 +140,11 @@ func TestObservabilityAddonController(t *testing.T) { manifestsPath := path.Join(wd, "../../manifests") os.Setenv("TEMPLATES_PATH", testManifestsPath) templates.ResetTemplates() - err = os.Symlink(manifestsPath, testManifestsPath) - if err != nil { - t.Fatalf("Failed to create symbollink(%s) to(%s) for the test manifests: (%v)", testManifestsPath, manifestsPath, err) + if _, err := os.Stat(testManifestsPath); err == os.ErrNotExist { + err = os.Symlink(manifestsPath, testManifestsPath) + if err != nil { + t.Fatalf("Failed to create symbollink(%s) to(%s) for the test manifests: (%v)", testManifestsPath, manifestsPath, err) + } } req := ctrl.Request{ @@ -312,8 +314,11 @@ func TestObservabilityAddonController(t *testing.T) { } // remove the testing manifests directory - if err = os.Remove(testManifestsPath); err != nil { - t.Fatalf("Failed to delete symbollink(%s) for the test manifests: (%v)", testManifestsPath, err) + _, err = os.Stat(testManifestsPath) + if err == nil { + if err = os.Remove(testManifestsPath); err != nil { + t.Fatalf("Failed to delete symbollink(%s) for the test manifests: (%v)", testManifestsPath, err) + } } os.Remove(path.Join(wd, "../../placementrule-tests")) } diff --git a/operators/multiclusterobservability/controllers/placementrule/role.go b/operators/multiclusterobservability/controllers/placementrule/role.go index ee5aacc15..f40ff5d67 100644 --- a/operators/multiclusterobservability/controllers/placementrule/role.go +++ b/operators/multiclusterobservability/controllers/placementrule/role.go @@ -133,7 +133,11 @@ func createClusterRoleBinding(c client.Client, namespace string, name string) er return nil } - log.Info("clusterrolebinding endpoint-observability-mco-rolebinding already existed/unchanged", "namespace", namespace) + log.Info( + "clusterrolebinding endpoint-observability-mco-rolebinding already existed/unchanged", + "namespace", + namespace, + ) return nil } @@ -268,7 +272,12 @@ func createResourceRoleBinding(c client.Client, namespace string, name string) e log.Info("Creating endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) err = c.Create(context.TODO(), rb) if err != nil { - log.Error(err, "Failed to create endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + log.Error( + err, + "Failed to create endpoint-observability-res-rolebinding rolebinding", + "namespace", + namespace, + ) return err } return nil @@ -282,7 +291,12 @@ func createResourceRoleBinding(c client.Client, namespace string, name string) e rb.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion err = c.Update(context.TODO(), rb) if err != nil { - log.Error(err, "Failed to update endpoint-observability-res-rolebinding rolebinding", "namespace", namespace) + log.Error( + err, + "Failed to update endpoint-observability-res-rolebinding rolebinding", + "namespace", + namespace, + ) return err } return nil @@ -325,7 +339,7 @@ func deleteResourceRole(c client.Client) error { func deleteRolebindings(c client.Client, namespace string) error { crb := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: namespace + "-" + resRoleBindingName, + Name: namespace + "-" + mcoRoleBindingName, }, } err := c.Delete(context.TODO(), crb) diff --git a/operators/multiclusterobservability/controllers/placementrule/role_test.go b/operators/multiclusterobservability/controllers/placementrule/role_test.go index c1aa77084..d9b760c20 100644 --- a/operators/multiclusterobservability/controllers/placementrule/role_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/role_test.go @@ -18,11 +18,11 @@ import ( "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" ) -const ( - secretName = "test-secret" - token = "test-token" - ca = "test-ca" -) +// const ( +// secretName = "test-secret" +// token = "test-token" +// ca = "test-ca" +// ) func TestCreateClusterRole(t *testing.T) { role := &rbacv1.ClusterRole{ @@ -60,7 +60,7 @@ func TestCreateClusterRole(t *testing.T) { }, } objs := []runtime.Object{role} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err := createClusterRole(c) if err != nil { t.Fatalf("createRole: (%v)", err) @@ -97,7 +97,7 @@ func TestCreateClusterRoleBinding(t *testing.T) { }, } objs := []runtime.Object{rb} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err := createClusterRoleBinding(c, namespace, namespace) if err != nil { t.Fatalf("createRoleBinding: (%v)", err) @@ -114,7 +114,7 @@ func TestCreateClusterRoleBinding(t *testing.T) { } func TestCreateRole(t *testing.T) { - c := fake.NewFakeClient() + c := fake.NewClientBuilder().Build() err := createResourceRole(c) if err != nil { t.Fatalf("createRole: (%v)", err) @@ -154,7 +154,7 @@ func TestCreateRole(t *testing.T) { }, } objs := []runtime.Object{role} - c = fake.NewFakeClient(objs...) + c = fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err = createResourceRole(c) if err != nil { t.Fatalf("createRole: (%v)", err) @@ -170,7 +170,7 @@ func TestCreateRole(t *testing.T) { } func TestCreateRoleBinding(t *testing.T) { - c := fake.NewFakeClient() + c := fake.NewClientBuilder().Build() err := createResourceRoleBinding(c, namespace, namespace) if err != nil { t.Fatalf("createRole: (%v)", err) @@ -207,7 +207,7 @@ func TestCreateRoleBinding(t *testing.T) { }, } objs := []runtime.Object{rb} - c = fake.NewFakeClient(objs...) + c = fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() err = createResourceRoleBinding(c, namespace, namespace) if err != nil { t.Fatalf("createRoleBinding: (%v)", err) diff --git a/operators/multiclusterobservability/controllers/placementrule/status.go b/operators/multiclusterobservability/controllers/placementrule/status.go index d0a3bd53d..7d41222f0 100644 --- a/operators/multiclusterobservability/controllers/placementrule/status.go +++ b/operators/multiclusterobservability/controllers/placementrule/status.go @@ -61,7 +61,12 @@ func updateAddonStatus(c client.Client, addonList mcov1beta1.ObservabilityAddonL managedclusteraddon.Status.Conditions = conditions err = c.Status().Update(context.TODO(), managedclusteraddon) if err != nil { - log.Error(err, "Failed to update status for managedclusteraddon", "namespace", addon.ObjectMeta.Namespace) + log.Error( + err, + "Failed to update status for managedclusteraddon", + "namespace", + addon.ObjectMeta.Namespace, + ) return err } log.Info("Updated status for managedclusteraddon", "namespace", addon.ObjectMeta.Namespace) diff --git a/operators/multiclusterobservability/controllers/placementrule/status_test.go b/operators/multiclusterobservability/controllers/placementrule/status_test.go index 4d17cbfa1..4db113e0b 100644 --- a/operators/multiclusterobservability/controllers/placementrule/status_test.go +++ b/operators/multiclusterobservability/controllers/placementrule/status_test.go @@ -26,7 +26,7 @@ func TestUpdateAddonStatus(t *testing.T) { Status: addonv1alpha1.ManagedClusterAddOnStatus{}, } objs := []runtime.Object{maddon} - c := fake.NewFakeClient(objs...) + c := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() addonList := &mcov1beta1.ObservabilityAddonList{ Items: []mcov1beta1.ObservabilityAddon{ diff --git a/operators/multiclusterobservability/main.go b/operators/multiclusterobservability/main.go index 4d6f82d33..2174bfd2b 100644 --- a/operators/multiclusterobservability/main.go +++ b/operators/multiclusterobservability/main.go @@ -32,6 +32,7 @@ import ( ocinfrav1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -53,7 +54,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/util" "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/webhook" operatorsutil "github.com/stolostron/multicluster-observability-operator/operators/pkg/util" - mchv1 "github.com/stolostron/multiclusterhub-operator/pkg/apis/operator/v1" + mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1" observatoriumAPIs "github.com/stolostron/observatorium-operator/api/v1alpha1" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" @@ -75,6 +76,7 @@ func init() { utilruntime.Must(observabilityv1beta1.AddToScheme(scheme)) utilruntime.Must(observabilityv1beta2.AddToScheme(scheme)) utilruntime.Must(observatoriumAPIs.AddToScheme(scheme)) + utilruntime.Must(prometheusv1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -197,17 +199,23 @@ func main() { if ingressCtlCrdExists { gvkLabelsMap[operatorv1.SchemeGroupVersion.WithKind("IngressController")] = []filteredcache.Selector{ - {FieldSelector: fmt.Sprintf("metadata.namespace==%s,metadata.name==%s", config.OpenshiftIngressOperatorNamespace, config.OpenshiftIngressOperatorCRName)}, + { + FieldSelector: fmt.Sprintf( + "metadata.namespace==%s,metadata.name==%s", + config.OpenshiftIngressOperatorNamespace, + config.OpenshiftIngressOperatorCRName, + ), + }, } } if mchCrdExists { - gvkLabelsMap[mchv1.SchemeGroupVersion.WithKind("MultiClusterHub")] = []filteredcache.Selector{ + gvkLabelsMap[mchv1.GroupVersion.WithKind("MultiClusterHub")] = []filteredcache.Selector{ {FieldSelector: fmt.Sprintf("metadata.namespace==%s", mcoNamespace)}, } } - // The following RBAC resources will not be watched by MCO, the selector will not impact the mco behaviour, which means - // MCO will fetch kube-apiserver for the correspoding resource if the resource can't be found in the cache. + // The following RBAC resources will not be watched by MCO, the selector will not impact the mco behaviour, which + // means MCO will fetch kube-apiserver for the correspoding resource if the resource can't be found in the cache. // Adding selector will reduce the cache size when the managedcluster scale. gvkLabelsMap[rbacv1.SchemeGroupVersion.WithKind("ClusterRole")] = []filteredcache.Selector{ {LabelSelector: "owner==multicluster-observability-operator"}, @@ -242,7 +250,12 @@ func main() { } if err = util.UpdateCRDWebhookNS(crdClient, mcoNamespace, config.MCOCrdName); err != nil { - setupLog.Error(err, "unable to update webhook service namespace in MCO CRD", "controller", "MultiClusterObservability") + setupLog.Error( + err, + "unable to update webhook service namespace in MCO CRD", + "controller", + "MultiClusterObservability", + ) } svmCrdExists, err := util.CheckCRDExist(crdClient, config.StorageVersionMigrationCrdName) diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml index 8c96b7380..376cd8f54 100644 --- a/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml +++ b/operators/multiclusterobservability/manifests/base/alertmanager/alertmanager-statefulset.yaml @@ -112,7 +112,7 @@ spec: - --skip-provider-button=true - --openshift-ca=/etc/pki/tls/cert.pem - --openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt - image: quay.io/stolostron/origin-oauth-proxy:2.0.11-SNAPSHOT-2021-04-29-18-29-17 + image: quay.io/stolostron/origin-oauth-proxy:4.5 imagePullPolicy: IfNotPresent name: alertmanager-proxy ports: diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml index 392b675f3..b3a2f4445 100644 --- a/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml +++ b/operators/multiclusterobservability/manifests/base/alertmanager/kustomization.yaml @@ -12,3 +12,4 @@ resources: - alertmanager-accessor-clusterrole.yaml - alertmanager-accessor-clusterrolebinding.yaml - alertmanager-accessor-serviceaccount.yaml +- prometheusrule.yaml diff --git a/operators/multiclusterobservability/manifests/base/alertmanager/prometheusrule.yaml b/operators/multiclusterobservability/manifests/base/alertmanager/prometheusrule.yaml new file mode 100644 index 000000000..0b65a7a40 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/alertmanager/prometheusrule.yaml @@ -0,0 +1,18 @@ +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + annotations: + update-namespace: 'false' + name: acm-observability-alert-rules + namespace: openshift-monitoring +spec: + groups: + - name: observability.rules + rules: + - alert: ACMRemoteWriteError + annotations: + summary: "Error in remote write." + description: "There are errors when sending requests to remote write endpoint: {{ $labels.name }}" + expr: increase(acm_remote_write_requests_total{code!~"2.*"}[5m]) > 10 + labels: + severity: critical diff --git a/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml index e2a1de4af..52ceb6179 100644 --- a/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml +++ b/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml @@ -146,7 +146,7 @@ data: namespace:kube_pod_container_resource_requests_cpu_cores:sum: namespace_cpu:kube_pod_container_resource_requests:sum node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate etcd_mvcc_db_total_size_in_bytes: etcd_debugging_mvcc_db_total_size_in_bytes - rules: + recording_rules: - record: apiserver_request_duration_seconds:histogram_quantile_99 expr: histogram_quantile(0.99,sum(rate(apiserver_request_duration_seconds_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (le)) - record: apiserver_request_duration_seconds:histogram_quantile_99:instance @@ -167,3 +167,233 @@ data: expr: sum(sum(sum(kube_pod_container_resource_requests{resource=\"memory\"}) by (pod,namespace,container) * on(pod,namespace) group_left(phase) max(kube_pod_status_phase{phase=~\"Running|Pending|Unknown\"} >0) by (pod,namespace,phase)) by (pod,namespace,phase)) - record: sli:apiserver_request_duration_seconds:trend:1m expr: sum(increase(apiserver_request_duration_seconds_bucket{job=\"apiserver\",service=\"kubernetes\",le=\"1\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) / sum(increase(apiserver_request_duration_seconds_count{job=\"apiserver\",service=\"kubernetes\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) + - record: container_memory_rss:sum + expr: sum(container_memory_rss{endpoint=\"https-metrics\"}) by (container, namespace) + - record: kube_pod_container_resource_limits:sum + expr: sum(kube_pod_container_resource_limits{endpoint=\"https-main\"}) by (resource, namespace) + - record: kube_pod_container_resource_requests:sum + expr: sum(kube_pod_container_resource_requests{endpoint=\"https-main\", container!=\"\"}) by (resource, namespace) + - record: namespace_workload_pod:kube_pod_owner:relabel:avg + expr: count(avg(namespace_workload_pod:kube_pod_owner:relabel{pod!=\"\"}) by (workload, namespace)) by (namespace) + - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum + expr: sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{container!=\"\"}) by (namespace) + collect_rules: + - name: NamespaceResourceUsage + annotations: + summary: + description: + selector: + matchExpressions: + - key: clusterType + operator: NotIn + values: ["SNO"] + rules: + - collect: NamespaceHighCPUUsage + annotations: + summary: + description: + expr: namespace:container_cpu_usage:sum/min(sum(kube_resourcequota{resource=\"limits.cpu\", type=\"hard\"}) by (namespace, resourcequota)) by (namespace) * 100 > 70 + for: 2m + matches: + - __name__="container_cpu_cfs_periods_total",namespace="{{ $labels.namespace }}" + - __name__="container_cpu_cfs_throttled_periods_total",namespace="{{ $labels.namespace }}" + - __name__="kube_pod_container_resource_limits",namespace="{{ $labels.namespace }}" + - __name__="kube_pod_container_resource_requests",namespace="{{ $labels.namespace }}" + - __name__="kube_resourcequota",namespace="{{ $labels.namespace }}" + - __name__="mixin_pod_workload",namespace="{{ $labels.namespace }}" + - __name__="namespace_workload_pod:kube_pod_owner:relabel",namespace="{{ $labels.namespace }}" + - __name__="node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate",namespace="{{ $labels.namespace }}" + - __name__="node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate",namespace="{{ $labels.namespace }}" + - collect: NamespaceHighMemoryUsage + annotations: + summary: + description: + expr: sum(container_memory_working_set_bytes{container!=\"\"}) by (namespace)/min(sum(kube_resourcequota{resource=\"limits.memory\", type=\"hard\"}) by (namespace, resourcequota)) by (namespace) * 100 > 70 + for: 2m + matches: + - __name__="container_memory_cache",namespace="{{ $labels.namespace }}" + - __name__="container_memory_rss",namespace="{{ $labels.namespace }}" + - __name__="container_memory_swap",namespace="{{ $labels.namespace }}" + - __name__="container_memory_working_set_bytes",namespace="{{ $labels.namespace }}" + - __name__="kube_pod_container_resource_limits",namespace="{{ $labels.namespace }}" + - __name__="kube_pod_container_resource_requests",namespace="{{ $labels.namespace }}" + - __name__="kube_resourcequota",namespace="{{ $labels.namespace }}" + - __name__="mixin_pod_workload",namespace="{{ $labels.namespace }}" + - __name__="namespace_workload_pod:kube_pod_owner:relabel",namespace="{{ $labels.namespace }}" + + - name: SNOResourceUsage + annotations: + summary: + description: + selector: + matchExpressions: + - key: clusterType + operator: In + values: ["SNO"] + rules: + - collect: SNOHighCPUUsage + annotations: + summary: + description: + expr: cluster:container_cpu_usage:ratio * 100 > 70 + for: 2m + names: + - container_cpu_cfs_periods_total + - container_cpu_cfs_throttled_periods_total + - kube_pod_container_resource_limits + - kube_pod_container_resource_requests + - kube_resourcequota + - mixin_pod_workload + - namespace_workload_pod:kube_pod_owner:relabel + - node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate + - node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + - collect: SNOHighMemoryUsage + annotations: + summary: + description: + expr: sum(container_memory_working_set_bytes{container!=\"\"})/sum(machine_memory_bytes) by() * 100 > 70 + for: 2m + names: + - container_memory_cache + - container_memory_rss + - container_memory_swap + - container_memory_working_set_bytes + - kube_pod_container_resource_limits + - kube_pod_container_resource_requests + - kube_resourcequota + - mixin_pod_workload + - namespace_workload_pod:kube_pod_owner:relabel + + ocp311_metrics_list.yaml: | + names: + - :node_memory_MemAvailable_bytes:sum + - container_cpu_cfs_periods_total + - container_cpu_cfs_throttled_periods_total + - etcd_debugging_mvcc_db_total_size_in_bytes + - etcd_disk_backend_commit_duration_seconds_bucket + - etcd_disk_wal_fsync_duration_seconds_bucket + - etcd_network_client_grpc_received_bytes_total + - etcd_network_client_grpc_sent_bytes_total + - etcd_network_peer_received_bytes_total + - etcd_network_peer_sent_bytes_total + - etcd_server_has_leader + - etcd_server_leader_changes_seen_total + - etcd_server_proposals_failed_total + - etcd_server_proposals_pending + - etcd_server_proposals_committed_total + - etcd_server_proposals_applied_total + - grpc_server_started_total + - instance:node_cpu_utilisation:rate1m + - instance:node_load1_per_cpu:ratio + - instance:node_memory_utilisation:ratio + - instance:node_network_receive_bytes_excluding_lo:rate1m + - instance:node_network_receive_drop_excluding_lo:rate1m + - instance:node_network_transmit_bytes_excluding_lo:rate1m + - instance:node_network_transmit_drop_excluding_lo:rate1m + - instance:node_num_cpu:sum + - instance:node_vmstat_pgmajfault:rate1m + - instance_device:node_disk_io_time_seconds:rate1m + - instance_device:node_disk_io_time_weighted_seconds:rate1m + - kube_node_status_allocatable + - kube_node_status_allocatable_cpu_cores + - kube_node_status_allocatable_memory_bytes + - kube_node_status_capacity_cpu_cores + - kube_node_status_condition + - kube_pod_container_resource_limits + - kube_pod_container_resource_limits_cpu_cores + - kube_pod_container_resource_limits_memory_bytes + - kube_pod_container_resource_requests + - kube_pod_container_resource_requests_cpu_cores + - kube_pod_container_resource_requests_memory_bytes + - kube_pod_info + - kube_pod_owner + - kube_resourcequota + - machine_cpu_cores + - machine_memory_bytes + - mixin_pod_workload + - namespace_workload_pod:kube_pod_owner:relabel + - node_cpu_seconds_total + - node_filesystem_avail_bytes + - node_filesystem_size_bytes + - node_memory_MemAvailable_bytes + - node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + - node_netstat_Tcp_OutSegs + - node_netstat_Tcp_RetransSegs + - node_netstat_TcpExt_TCPSynRetrans + - up + - node:node_cpu_utilisation:avg1m + - kube_node_labels + - 'node_namespace_pod:kube_pod_info:' + - container_memory_usage_bytes + - node_memory_MemTotal_bytes + - node:node_memory_bytes_total:sum + - node:node_net_utilisation:sum_irate + - node_network_receive_bytes + - node_network_transmit_bytes + - node_disk_bytes_read + - node_disk_bytes_written + - node:node_disk_utilisation:avg_irate + - kube_pod_status_ready + - kube_pod_status_phase + - node_filesystem_size + - node_filesystem_avail + - kube_pod_container_status_restarts_total + - openshift_clusterresourcequota_usage + - openshift_clusterresourcequota_labels + - namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate + - kube_namespace_labels + - container_memory_rss + - container_network_receive_bytes_total + - container_network_transmit_bytes_total + - container_network_receive_packets_total + - container_network_transmit_packets_total + - container_network_receive_packets_dropped_total + - container_network_transmit_packets_dropped_total + - container_cpu_usage_seconds_total + matches: + - __name__="workqueue_queue_duration_seconds_bucket",job="apiserver" + - __name__="workqueue_adds_total",job="apiserver" + - __name__="workqueue_depth",job="apiserver" + - __name__="go_goroutines",job="apiserver" + - __name__="process_cpu_seconds_total",job="apiserver" + - __name__="process_resident_memory_bytes",job="apiserver" + - __name__="container_memory_cache",container!="" + - __name__="container_memory_rss",container!="" + - __name__="container_memory_swap",container!="" + - __name__="container_memory_working_set_bytes",container_name!="" + renames: + mixin_pod_workload: namespace_workload_pod:kube_pod_owner:relabel + namespace:kube_pod_container_resource_requests_cpu_cores:sum: namespace_cpu:kube_pod_container_resource_requests:sum + node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + etcd_mvcc_db_total_size_in_bytes: etcd_debugging_mvcc_db_total_size_in_bytes + recording_rules: + - record: apiserver_request_duration_seconds:histogram_quantile_99 + expr: (histogram_quantile(0.99,sum(rate(apiserver_request_latencies_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (le)))/1000000 + - record: apiserver_request_duration_seconds:histogram_quantile_99:instance + expr: (histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{job=\"apiserver\", verb!=\"WATCH\"}[5m])) by (le, verb, instance)))/1000000 + - record: sum:apiserver_request_total:1h + expr: sum(rate(apiserver_request_count{job=\"apiserver\"}[1h])) by(code, instance) + - record: sum:apiserver_request_total:5m + expr: sum(rate(apiserver_request_count{job=\"apiserver\"}[5m])) by(code, instance) + - record: rpc_rate:grpc_server_handled_total:sum_rate + expr: sum(rate(grpc_server_handled_total{job=\"etcd\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m])) + - record: active_streams_watch:grpc_server_handled_total:sum + expr: sum(grpc_server_started_total{job=\"etcd\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"etcd\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) + - record: active_streams_lease:grpc_server_handled_total:sum + expr: sum(grpc_server_started_total{job=\"etcd\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"etcd\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) + - record: cluster:kube_pod_container_resource_requests:cpu:sum + expr: sum(sum(sum(kube_pod_container_resource_requests_cpu_cores) by (pod,namespace,container) * on(pod,namespace) group_left(phase) max(kube_pod_status_phase{phase=~\"Running|Pending|Unknown\"} >0) by (pod,namespace,phase)) by (pod,namespace,phase)) + - record: cluster:kube_pod_container_resource_requests:memory:sum + expr: sum(sum(sum(kube_pod_container_resource_requests_memory_bytes) by (pod,namespace,container) * on(pod,namespace) group_left(phase) max(kube_pod_status_phase{phase=~\"Running|Pending|Unknown\"} >0) by (pod,namespace,phase)) by (pod,namespace,phase)) + - record: sli:apiserver_request_duration_seconds:trend:1m + expr: sum(increase(apiserver_request_latencies_bucket{job=\"apiserver\",service=\"kubernetes\",le=\"1\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) / sum(increase(apiserver_request_latencies_count{job=\"apiserver\",service=\"kubernetes\",verb=~\"POST|PUT|DELETE|PATCH\"}[1m])) + - record: :node_memory_MemAvailable_bytes:sum + expr: sum(node_memory_MemAvailable_bytes{job=\"node-exporter\"}or(node_memory_Buffers_bytes{job=\"node-exporter\"} + node_memory_Cached_bytes{job=\"node-exporter\"} + node_memory_MemFree_bytes{job=\"node-exporter\"} + node_memory_Slab_bytes{job=\"node-exporter\"})) + - record: instance:node_network_receive_bytes_excluding_lo:rate1m + expr: sum(rate(node_network_receive_bytes_total{job=\"node-exporter\", device!=\"lo\"}[1m])) without(device) + - record: instance:node_network_transmit_bytes_excluding_lo:rate1m + expr: sum(rate(node_network_transmit_bytes_total{job=\"node-exporter\", device!=\"lo\"}[1m])) without(device) + - record: instance:node_network_receive_drop_excluding_lo:rate1m + expr: sum(rate(node_network_receive_drop_total{job=\"node-exporter\", device!=\"lo\"}[1m])) without(device) + - record: instance:node_network_transmit_drop_excluding_lo:rate1m + expr: sum(rate(node_network_transmit_drop_total{job=\"node-exporter\", device!=\"lo\"}[1m])) without(device) diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml index 5ad8a2dfb..f6f87b697 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview-ocp311.yaml @@ -293,226 +293,6 @@ data: ], "type": "table" }, - { - "datasource": null, - "description": "Leader election changes per cluster over the time range selected for dashboard.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": null, - "filterable": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "cluster" - }, - "properties": [ - { - "id": "displayName", - "value": "Cluster" - }, - { - "id": "links", - "value": [ - { - "title": "Drill down to cluster", - "url": "/d/N8BxQ2jMz/kubernetes-etcd-cluster?var-cluster=${__data.fields.cluster}" - } - ] - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #A" - }, - "properties": [ - { - "id": "displayName", - "value": "Leader Election Changes" - }, - { - "id": "custom.align", - "value": "left" - }, - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "red", - "value": 2 - } - ] - } - }, - { - "id": "custom.displayMode", - "value": "color-text" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #B" - }, - "properties": [ - { - "id": "displayName", - "value": "DB Size" - }, - { - "id": "unit", - "value": "bytes" - }, - { - "id": "custom.align", - "value": "left" - }, - { - "id": "decimals", - "value": 2 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Value #C" - }, - "properties": [ - { - "id": "displayName", - "value": "Has a Leader" - }, - { - "id": "mappings", - "value": [ - { - "from": "", - "id": 1, - "text": "Yes", - "to": "", - "type": 1, - "value": "1" - }, - { - "from": "", - "id": 2, - "text": "No", - "to": "", - "type": 1, - "value": "0" - } - ] - }, - { - "id": "custom.align", - "value": "left" - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 150, - "interval": "1m", - "options": { - "frameIndex": 2, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "7.3.10", - "targets": [ - { - "expr": "sum(changes(etcd_server_leader_changes_seen_total{clusterType=\"ocp3\",job=\"etcd\"}[$__range])) by (cluster)", - "format": "table", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - }, - { - "expr": "max(etcd_debugging_mvcc_db_total_size_in_bytes{clusterType=\"ocp3\",job=\"etcd\"}) by (cluster)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "B" - }, - { - "expr": "max(etcd_server_has_leader{clusterType=\"ocp3\",job=\"etcd\"}) by (cluster)", - "format": "table", - "hide": false, - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "C" - } - ], - "title": "etcd", - "transformations": [ - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "cluster", - "Value #B", - "Value #A", - "Value #C" - ] - } - } - }, - { - "id": "merge", - "options": {} - }, - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": { - "Value #A": 2, - "Value #B": 3, - "Value #C": 1, - "cluster": 0 - }, - "renameByName": {} - } - } - ], - "type": "table" - }, { "collapsed": false, "datasource": "$datasource", @@ -683,7 +463,7 @@ data: { "targetBlank": false, "title": "Drill down to cluster", - "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + "url": "/d/7Rm6ZjqGk/acm-resource-optimization-cluster-ocp-3-11?var-cluster=${__data.fields.cluster}" } ] }, @@ -713,7 +493,7 @@ data: "pluginVersion": "7.3.10", "targets": [ { - "expr": "(sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)) - (1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster))", + "expr": "(sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_cpu_cores{clusterType=\"ocp3\"}) by (cluster)) - (1 - avg(rate(node_cpu_seconds_total{clusterType=\"ocp3\",mode=\"idle\"}[$__rate_interval])) by (cluster))", "format": "table", "hide": false, "instant": true, @@ -723,7 +503,7 @@ data: "refId": "C" }, { - "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_cpu_cores{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -925,7 +705,7 @@ data: { "targetBlank": false, "title": "Drill down to cluster", - "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + "url": "/d/7Rm6ZjqGk/acm-resource-optimization-cluster-ocp-3-11?var-cluster=${__data.fields.cluster}" } ] }, @@ -955,7 +735,7 @@ data: "pluginVersion": "7.3.10", "targets": [ { - "expr": "(sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)) - (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster))", + "expr": "(sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)) - (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster))", "format": "table", "hide": false, "instant": true, @@ -965,7 +745,7 @@ data: "refId": "C" }, { - "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -975,7 +755,7 @@ data: "refId": "A" }, { - "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -1161,7 +941,7 @@ data: { "targetBlank": false, "title": "Drill down to cluster", - "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + "url": "/d/7Rm6ZjqGk/acm-resource-optimization-cluster-ocp-3-11?var-cluster=${__data.fields.cluster}" } ] }, @@ -1199,7 +979,7 @@ data: "refId": "F" }, { - "expr": "sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "expr": "sum(kube_node_status_allocatable_cpu_cores{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "instant": true, "interval": "", @@ -1207,7 +987,7 @@ data: "refId": "B" }, { - "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"cpu\"}) by (cluster)", + "expr": "sum(cluster:kube_pod_container_resource_requests:cpu:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_cpu_cores{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -1495,7 +1275,7 @@ data: { "targetBlank": false, "title": "Drill down to cluster", - "url": "/d/8Qvi3edMz/acm-resource-optimization-cluster?var-cluster=${__data.fields.cluster}" + "url": "/d/7Rm6ZjqGk/acm-resource-optimization-cluster-ocp-3-11?var-cluster=${__data.fields.cluster}" } ] }, @@ -1534,7 +1314,7 @@ data: "refId": "F" }, { - "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "expr": "sum(cluster:kube_pod_container_resource_requests:memory:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -1544,7 +1324,7 @@ data: "refId": "A" }, { - "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)", + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)", "format": "table", "hide": false, "instant": true, @@ -1655,7 +1435,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "topk(5, (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable{clusterType=\"ocp3\",resource=\"memory\"}) by (cluster)))", + "expr": "topk(5, (1 - sum(:node_memory_MemAvailable_bytes:sum{clusterType=\"ocp3\"}) by (cluster) / sum(kube_node_status_allocatable_memory_bytes{clusterType=\"ocp3\"}) by (cluster)))", "format": "time_series", "instant": false, "interval": "", @@ -1981,6 +1761,5 @@ kind: ConfigMap metadata: name: grafana-dashboard-acm-clusters-overview-ocp311 namespace: open-cluster-management-observability - labels: - general-folder: 'true' - + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml index 142854c94..67eb65fde 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-clusters-overview.yaml @@ -250,7 +250,7 @@ data: "targets": [ { "exemplar": true, - "expr": "topk(50, max(apiserver_request_duration_seconds:histogram_quantile_99) by (cluster))\n* on(cluster) group_left(api_up) count_values without() (\"api_up\", (sum(up{job=\"apiserver\"} == 1) by (cluster) / count(up{job=\"apiserver\"}) by (cluster)))", + "expr": "topk(50, max(apiserver_request_duration_seconds:histogram_quantile_99{clusterType!=\"ocp3\"}) by (cluster))\n* on(cluster) group_left(api_up) count_values without() (\"api_up\", (sum(up{job=\"apiserver\",clusterType!=\"ocp3\"} == 1) by (cluster) / count(up{job=\"apiserver\",clusterType!=\"ocp3\"}) by (cluster)))", "format": "table", "instant": true, "interval": "", @@ -259,7 +259,7 @@ data: }, { "exemplar": true, - "expr": "sum by (cluster)(sum:apiserver_request_total:1h{code=~\"5..\"})", + "expr": "sum by (cluster)(sum:apiserver_request_total:1h{code=~\"5..\",clusterType!=\"ocp3\"})", "format": "table", "hide": false, "instant": true, @@ -1691,7 +1691,7 @@ data: "pluginVersion": "7.5.5", "targets": [ { - "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster)\n* on(cluster) group_left(node_transmit) count_values without() (\"node_transmit\", sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_receive_drop) count_values without() (\"node_receive_drop\", sum(instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_transmit_drop) count_values without() (\"node_transmit_drop\", sum(instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))", + "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\",clusterType!=\"ocp3\"}) by (cluster)\n* on(cluster) group_left(node_transmit) count_values without() (\"node_transmit\", sum(instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_receive_drop) count_values without() (\"node_receive_drop\", sum(instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))\n* on(cluster) group_left(node_transmit_drop) count_values without() (\"node_transmit_drop\", sum(instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\"}) by (cluster))", "legendFormat": "", "interval": "", "exemplar": true, diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview-ocp311.yaml new file mode 100644 index 000000000..0437167e4 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview-ocp311.yaml @@ -0,0 +1,1433 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-acm-optimization-overview-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + acm-optimization-overview-ocp311.json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 2, + "iteration": 1639517441128, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "title": "CPU", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "#F2495C", + "value": 0.2 + }, + { + "color": "#F2495C" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 8, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})) - (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\",cluster=\"$cluster\"}[$__rate_interval])))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overestimation", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 8, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 20, + "x": 4, + "y": 1 + }, + "hiddenSeries": false, + "id": 16, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)) or (sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate) by (namespace))", + "interval": "", + "legendFormat": "{{namespace}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 5 + }, + "id": 6, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 9 + }, + "id": 4, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "type": "stat" + }, + { + "columns": [], + "datasource": "$datasource", + "description": "", + "fontSize": "100%", + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 26, + "interval": "4m", + "links": [], + "options": { + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Namespace" + } + ] + }, + "pageSize": null, + "pluginVersion": "7.1.3", + "showHeader": true, + "sort": { + "col": 4, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Pods", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/fc12952e0f6a5dd5b23b1729e03ebfc7/kubernetes-compute-resources-namespace-pods-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_0&orgId=1&refresh=5m", + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/fc12952e0f6a5dd5b23b1729e03ebfc7/kubernetes-compute-resources-namespace-pods-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_0&orgId=1&refresh=5m", + "mappingType": 1, + "pattern": "namespace", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Utilization", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Overcommitted Requests %", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Overcommitted Core", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Department", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "label_dept", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "CPU Limits", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Max CPU Utilization", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 4, + "mappingType": 1, + "pattern": "Value #H", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(kube_namespace_labels{label_dept=~\".+\",cluster=\"$cluster\"}) by (namespace,label_dept)", + "format": "table", + "instant": true, + "refId": "F" + }, + { + "expr": "((sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace))-(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace))) or ((sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace))-(sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)))", + "format": "table", + "instant": true, + "refId": "B" + }, + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)) or (sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)) or (sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "refId": "G" + }, + { + "expr": "max_over_time(sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)[$__range:])", + "format": "table", + "instant": true, + "refId": "H" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage by Namespace", + "transform": "table", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "namespace" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Pods", + "Value #B": "Workloads", + "Value #C": "CPU Usage", + "Value #D": "CPU Requests", + "Value #E": "CPU Requests %", + "Value #F": "CPU Limits", + "Value #G": "CPU Limits %", + "namespace": "Namespace" + } + } + } + ], + "type": "table-old" + }, + { + "collapsed": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 18, + "panels": [], + "title": "Memory", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "#F2495C", + "value": 0.2 + }, + { + "color": "#F2495C" + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 23 + }, + "id": 14, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"}))\n-\n(1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})) or (sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"}))\n-\n(1 - sum(node_memory_MemAvailable_bytes{job=\"node-exporter\",cluster=\"$cluster\"})/ sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"}))", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Overestimation", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 8, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 20, + "x": 4, + "y": 23 + }, + "hiddenSeries": false, + "id": 22, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(container_memory_rss{cluster=\"$cluster\",container!=\"\"}) by (namespace)) or (sum(container_memory_rss{cluster=\"$cluster\",container_name!=\"\"}) by (namespace))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 27 + }, + "id": 12, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Requests Commitment", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 31 + }, + "id": 10, + "interval": "4m", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})) or (1 - sum(node_memory_MemAvailable_bytes{job=\"node-exporter\",cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"}))", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "type": "stat" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 28, + "interval": "4m", + "links": [], + "options": { + "showHeader": true + }, + "pageSize": null, + "pluginVersion": "7.1.3", + "showHeader": true, + "sort": { + "col": 5, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pods", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/fc12952e0f6a5dd5b23b1729e03ebfc7/kubernetes-compute-resources-namespace-pods-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_0&orgId=1&refresh=5m", + "mappingType": 1, + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Namespace", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/fc12952e0f6a5dd5b23b1729e03ebfc7/kubernetes-compute-resources-namespace-pods-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_0&orgId=1&refresh=5m", + "mappingType": 1, + "pattern": "namespace", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Delta Memory Usage", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Department", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "label_dept", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "MemoryLimits", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Max Memory Usage", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #H", + "thresholds": [], + "type": "number", + "unit": "bytes" + } + ], + "targets": [ + { + "expr": "count(kube_namespace_labels{label_dept=~\".+\",cluster=\"$cluster\"}) by (namespace,label_dept)", + "format": "table", + "instant": true, + "refId": "F" + }, + { + "expr": "(sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace))-(sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)) or ((sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace))-(sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace)))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "refId": "B" + }, + { + "expr": "(sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)) or (sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "(sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)) or (sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "refId": "G" + }, + { + "expr": "max_over_time(sum(container_memory_rss{cluster=\"$cluster\",container_name!=\"\"}) by (namespace) [$__range:])", + "format": "table", + "instant": true, + "refId": "H" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage by Namespace", + "transform": "table", + "transformations": [ + { + "id": "seriesToColumns", + "options": { + "byField": "namespace" + } + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "Value #A": "Pods", + "Value #B": "Workloads", + "Value #C": "Memory Usage", + "Value #D": "Memory Requests", + "Value #E": "Memory Requests %", + "Value #F": "Memory Limits", + "Value #G": "Memory Limits %", + "namespace": "Namespace" + } + } + } + ], + "type": "table-old" + } + ], + "refresh": "5m", + "schemaVersion": 30, + "style": "dark", + "tags": [ + "ACM" + ], + "templating": { + "list": [ + { + "current": { + "text": "Observatorium", + "value": "Observatorium" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 5, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "ups-jb", + "value": "ups-jb" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "ACM - Resource Optimization / Cluster - OCP 3.11", + "uid": "7Rm6ZjqGk", + "version": 1 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml index 8b97e43e1..7e138d5ac 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-acm-optimization-overview.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -18,13 +24,17 @@ data: "editable": true, "gnetId": null, "graphTooltip": 0, - "id": 2, - "iteration": 1621618422811, + "id": 4, + "iteration": 1646943408368, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -39,21 +49,25 @@ data: { "cacheTimeout": null, "datasource": "$datasource", + "description": "Highlights % differences between CPU requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -99,10 +113,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "(sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})) - (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\",cluster=\"$cluster\"}[$__rate_interval])))", + "exemplar": true, + "expr": "(sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})) - (1 - avg(rate(node_cpu_seconds_total{mode=\"idle\",cluster=\"$cluster\"}[$__rate_interval])))", "format": "time_series", "instant": true, "interval": "", @@ -114,7 +129,6 @@ data: "timeFrom": null, "timeShift": null, "title": "Overestimation", - "description": "Highlights % differences between CPU requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", "type": "stat" }, { @@ -125,7 +139,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -157,7 +170,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -167,7 +180,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}", "interval": "", "legendFormat": "{{namespace}}", "refId": "A" @@ -223,16 +236,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -275,10 +291,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", "format": "time_series", "instant": true, "interval": "", @@ -301,16 +318,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -353,7 +373,7 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", @@ -377,6 +397,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -576,10 +597,11 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -589,7 +611,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}", "format": "table", "instant": true, "intervalFactor": 1, @@ -598,7 +620,8 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -647,6 +670,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -661,21 +688,25 @@ data: { "cacheTimeout": null, "datasource": "$datasource", + "description": "Highlights % differences between Memory requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", "fieldConfig": { "defaults": { "color": { "mode": "thresholds" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -721,10 +752,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "(sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))\n-\n(1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))", + "exemplar": true, + "expr": "(sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))\n-\n(1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"})/ sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"}))", "format": "time_series", "instant": true, "interval": "", @@ -736,7 +768,6 @@ data: "timeFrom": null, "timeShift": null, "title": "Overestimation", - "description": "Highlights % differences between Memory requests commitments vs utilization. When this difference is large ( >20%), it means that resources are reserved but unused.", "type": "stat" }, { @@ -747,7 +778,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -780,7 +810,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -790,7 +820,8 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace)", "format": "time_series", "interval": "", "intervalFactor": 2, @@ -850,16 +881,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -902,10 +936,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", "format": "time_series", "instant": true, "interval": "", @@ -928,16 +963,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -980,7 +1018,7 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", @@ -1007,6 +1045,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -1205,10 +1244,11 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1218,16 +1258,19 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace)", "format": "table", "instant": true, + "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "C", "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1276,6 +1319,10 @@ data: { "collapsed": false, "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -1296,6 +1343,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -1462,7 +1510,7 @@ data: "showHeader": true, "sortBy": [] }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", @@ -1536,7 +1584,7 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", "tags": [ "ACM" @@ -1572,7 +1620,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -1582,7 +1630,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "Observatorium-cluster-Variable-Query" }, "refresh": 2, @@ -1590,7 +1638,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1615,7 +1662,7 @@ data: "timezone": "browser", "title": "ACM - Resource Optimization / Cluster", "uid": "8Qvi3edMz", - "version": 3 + "version": 4 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml index f2eb93d78..5f589a89c 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-cluster-rsrc-use.yaml @@ -996,7 +996,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "hide": 0, "includeAll": false, "label": null, @@ -1014,7 +1014,7 @@ data: "value": "" } ], - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refresh": 2, "regex": "", "skipUrlSync": false, diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-capacity-planning-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-capacity-planning-ocp311.yaml new file mode 100644 index 000000000..51e934fa3 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-capacity-planning-ocp311.yaml @@ -0,0 +1,1088 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-capacity-management-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-capacity-management-ocp311.json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 4, + "iteration": 1639517557117, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 9, + "panels": [], + "title": "$cluster Cluster Capacity", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 7, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (openshift_clusterresourcequota_labels{cluster=\"$cluster\"} * on (name) group_right () openshift_clusterresourcequota_usage{resource=\"requests.memory\",type=\"hard\"}) by (cluster))/(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_right () kube_node_status_allocatable_memory_bytes) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Quota / Allocatable in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 19, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum (kube_pod_container_resource_requests_memory_bytes) by (node)) by (cluster))/(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_memory_bytes) by (cluster))", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "title": "Pod requests / Allocatable in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 20, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum(kube_pod_info * on (pod) group_left() label_replace(sum(container_memory_usage_bytes{container_name=~\".+\", container_name!=\"POD\"}) by (pod_name),\"pod\",\"$1\",\"pod_name\", \"(.+)\")) by (node)) by (cluster)/(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_memory_bytes) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "title": "Current Memory Usage / Allocatable in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "What has been promised", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 0, + "y": 6 + }, + "id": 4, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (openshift_clusterresourcequota_labels{cluster=\"$cluster\"} * on (name) group_right () openshift_clusterresourcequota_usage{cluster=\"$cluster\",resource=\"requests.memory\",type=\"hard\"}) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Sum of Quotas", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "What developers estimate they will need", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 8, + "y": 6 + }, + "id": 5, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum (kube_pod_container_resource_requests_memory_bytes) by (node)) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "title": "Sum of Pod Requests", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 16, + "y": 6 + }, + "id": 6, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum(kube_pod_info * on (pod) group_left() label_replace(sum(container_memory_usage_bytes{container_name=~\".+\", container_name!=\"POD\"}) by (pod_name),\"pod\",\"$1\",\"pod_name\", \"(.+)\")) by (node)) by (cluster)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "C" + } + ], + "title": "Current Memory Utilization", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 12, + "panels": [], + "title": "$cluster Cluster CPU Status", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 9 + }, + "id": 21, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (openshift_clusterresourcequota_labels{cluster=\"$cluster\"} * on (name) group_right () openshift_clusterresourcequota_usage{resource=\"requests.cpu\",type=\"hard\"}) by (cluster))/(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_right () kube_node_status_allocatable_cpu_cores) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Quota/ Allocatable in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 9 + }, + "id": 22, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum (kube_pod_container_resource_requests_cpu_cores) by (node)) by (cluster))/(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_cpu_cores) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "B" + } + ], + "title": "Pod requests / Allocatable in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "OS and Container CPU usage", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 0.6 + }, + { + "color": "#d44a3a", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 9 + }, + "id": 23, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum(kube_pod_info * on (pod) group_left() label_replace(sum(rate (container_cpu_usage_seconds_total{container_name=~\".+\", container_name!=\"POD\"} [$__rate_interval])) by (pod_name),\"pod\",\"$1\",\"pod_name\", \"(.+)\")) by (node)) by (cluster) / (sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_cpu_cores) by (cluster)) / (sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_cpu_cores) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Current CPU Usage in Cluster: $cluster", + "type": "gauge" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "What has been promised", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 0, + "y": 14 + }, + "id": 13, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (openshift_clusterresourcequota_labels{cluster=\"$cluster\"} * on (name) group_right () openshift_clusterresourcequota_usage{cluster=\"$cluster\",resource=\"requests.cpu\",type=\"hard\"}) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Sum of Quotas", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "description": "What developers estimate they will need", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 8, + "y": 14 + }, + "id": 14, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "(sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum (kube_pod_container_resource_requests_cpu_cores) by (node)) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Sum of Pod Requests", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 8, + "x": 16, + "y": 14 + }, + "id": 15, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () sum(kube_pod_info * on (pod) group_left() label_replace(sum(rate (container_cpu_usage_seconds_total{container_name=~\".+\", container_name!=\"POD\"} [$__rate_interval])) by (pod_name),\"pod\",\"$1\",\"pod_name\", \"(.+)\")) by (node)) by (cluster) / (sum (kube_node_labels{cluster=\"$cluster\"} * on (node) group_left () kube_node_status_allocatable_cpu_cores) by (cluster))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "title": "Current CPU Utilization", + "type": "stat" + } + ], + "refresh": "5m", + "schemaVersion": 30, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Observatorium", + "value": "Observatorium" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": "", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "ups-jb", + "value": "ups-jb" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Capacity Management - OCP 3.11", + "uid": "UiQ7nV7Zz-3", + "version": 1 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml index 560a22525..cb5364652 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-cluster.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,13 +25,17 @@ data: "editable": true, "gnetId": 12114, "graphTooltip": 0, - "id": 3, - "iteration": 1621617333424, + "id": 9, + "iteration": 1646946627802, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -47,16 +57,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -103,7 +116,7 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", @@ -127,16 +140,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -183,10 +199,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", "format": "time_series", "instant": true, "interval": "", @@ -209,16 +226,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -265,10 +285,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"cpu\"})", "format": "time_series", "instant": true, "interval": "", @@ -291,16 +312,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -347,7 +371,7 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", @@ -373,16 +397,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -429,10 +456,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", "format": "time_series", "instant": true, "interval": "", @@ -455,16 +483,19 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "mappings": [ { - "id": 0, - "op": "=", - "text": "N/A", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -511,10 +542,11 @@ data: "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { - "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{cluster=\"$cluster\", resource=\"memory\"})", "format": "time_series", "instant": true, "interval": "", @@ -531,6 +563,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -551,7 +587,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -583,7 +618,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -593,7 +628,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{namespace}}", @@ -646,6 +681,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -667,6 +706,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -924,7 +964,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", @@ -938,7 +978,7 @@ data: "step": 10 }, { - "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "expr": "namespace_workload_pod:kube_pod_owner:relabel:avg{cluster=\"$cluster\"}", "format": "table", "hide": false, "instant": true, @@ -949,7 +989,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "expr": "node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}", "format": "table", "instant": true, "intervalFactor": 2, @@ -958,7 +998,8 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -968,7 +1009,8 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -978,7 +1020,8 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -988,7 +1031,8 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", + "exemplar": true, + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1031,6 +1075,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -1051,7 +1099,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -1083,7 +1130,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -1093,8 +1140,10 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace)", "format": "time_series", + "interval": "", "intervalFactor": 2, "legendFormat": "{{namespace}}", "legendLink": null, @@ -1146,6 +1195,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -1167,6 +1220,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -1428,7 +1482,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", @@ -1442,7 +1496,7 @@ data: "step": 10 }, { - "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "expr": "namespace_workload_pod:kube_pod_owner:relabel:avg{cluster=\"$cluster\"}", "format": "table", "hide": false, "instant": true, @@ -1453,16 +1507,19 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace)", "format": "table", "instant": true, + "interval": "", "intervalFactor": 2, "legendFormat": "", "refId": "C", "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1472,7 +1529,8 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1482,7 +1540,8 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1492,7 +1551,8 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", + "exemplar": true, + "expr": "sum(container_memory_rss:sum{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits:sum{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "interval": "", @@ -1535,6 +1595,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -1556,6 +1620,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -1707,6 +1772,18 @@ data: "value": 300 } ] + }, + { + "matcher": { + "id": "byName", + "options": "Current Bandwidth Transmitted" + }, + "properties": [ + { + "id": "custom.width", + "value": null + } + ] } ] }, @@ -1722,7 +1799,7 @@ data: "showHeader": true, "sortBy": [] }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster=\"$cluster\"}) by (instance)", @@ -1802,12 +1879,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "color": {}, - "custom": {}, - "thresholds": { - "mode": "absolute", - "steps": [] - }, "unit": "Bps" }, "overrides": [] @@ -1845,7 +1916,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -1917,12 +1988,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "color": {}, - "custom": {}, - "thresholds": { - "mode": "absolute", - "steps": [] - }, "unit": "Bps" }, "overrides": [] @@ -1960,7 +2025,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -2025,7 +2090,7 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", "tags": [ "kubernetes-mixin" @@ -2078,7 +2143,6 @@ data: "skipUrlSync": false, "sort": 2, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -2114,7 +2178,7 @@ data: "timezone": "browser", "title": "Kubernetes / Compute Resources / Cluster", "uid": "efa86fd1d0c121a26444b636a3f509a8", - "version": 4 + "version": 5 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods-ocp311.yaml new file mode 100644 index 000000000..b6fd22e05 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods-ocp311.yaml @@ -0,0 +1,2407 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-compute-resources-namespace-pods-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-compute-resources-namespace-pods-ocp311.json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12117, + "graphTooltip": 0, + "id": 21, + "iteration": 1639517642230, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Headlines", + "type": "row" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 3, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 1, + "interval": "", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from requests)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 3, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 2, + "interval": "", + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from limits)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 3, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilization (from requests)", + "type": "stat" + }, + { + "cacheTimeout": null, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 3, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#299c46", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 70 + }, + { + "color": "#d44a3a", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 4, + "interval": null, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation (from limits)", + "type": "stat" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 17, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)) or (sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod_name))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod_name}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 18, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 13 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 5, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "$$hashKey": "object:98", + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "$$hashKey": "object:99", + "alias": "CPU Usage", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:100", + "alias": "CPU Requests", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:101", + "alias": "CPU Requests %", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "$$hashKey": "object:102", + "alias": "CPU Limits", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:103", + "alias": "CPU Limits %", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "$$hashKey": "object:104", + "alias": "Pod", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/0c2f0c6c0e0c0cccf43e658dfeda09a5/kubernetes-compute-resources-pod-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell&orgId=1&refresh=5m", + "pattern": "pod", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:105", + "alias": "Max CPU Utilization", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 4, + "mappingType": 1, + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:106", + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)) or (sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)) or (sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "max_over_time(sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)[$__range:])", + "format": "table", + "instant": true, + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table-old", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 19, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 7, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"}) by (pod_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 20, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 8, + "interval": "4m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "$$hashKey": "object:258", + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "$$hashKey": "object:259", + "alias": "Memory Usage", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:260", + "alias": "Memory Requests", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:261", + "alias": "Memory Requests %", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "$$hashKey": "object:262", + "alias": "Memory Limits", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:263", + "alias": "Memory Limits %", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "$$hashKey": "object:264", + "alias": "Memory Usage (RSS)", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:265", + "alias": "Memory Usage (Cache)", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:266", + "alias": "Memory Usage (Swap)", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:267", + "alias": "Pod", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/0c2f0c6c0e0c0cccf43e658dfeda09a5/kubernetes-compute-resources-pod-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell&orgId=1&refresh=5m", + "pattern": "pod", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "$$hashKey": "object:268", + "alias": "Max Memory Usage", + "align": "auto", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #I", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "$$hashKey": "object:269", + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "H", + "step": 10 + }, + { + "expr": "max_over_time(sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)[$__range:])", + "format": "table", + "instant": true, + "refId": "I" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table-old", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 38 + }, + "id": 21, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 39 + }, + "id": 9, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "align": "auto", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [], + "type": "number", + "unit": "pps" + }, + { + "alias": "Pod", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "/d/0c2f0c6c0e0c0cccf43e658dfeda09a5/kubernetes-compute-resources-pod-ocp-3-11?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell&orgId=1&refresh=5m", + "pattern": "pod", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "align": "auto", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table-old", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 22, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 49 + }, + "hiddenSeries": false, + "id": 10, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 23, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 11, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 24, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 65 + }, + "hiddenSeries": false, + "id": 12, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 25, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 73 + }, + "hiddenSeries": false, + "id": 13, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 26, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 81 + }, + "hiddenSeries": false, + "id": 14, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 27, + "panels": [], + "repeat": null, + "title": "Network", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 89 + }, + "hiddenSeries": false, + "id": 15, + "interval": "2m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5m", + "schemaVersion": 30, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Observatorium", + "value": "Observatorium" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "ups-jb", + "value": "ups-jb" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"}, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"}, cluster)", + "refId": "Observatorium-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "open-cluster-management-agent-addon", + "value": "open-cluster-management-agent-addon" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "Observatorium-namespace-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Namespace (Pods) - OCP 3.11", + "uid": "fc12952e0f6a5dd5b23b1729e03ebfc7", + "version": 1 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml index 3237fb2d6..d84d8862e 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-pods.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,8 +25,8 @@ data: "editable": true, "gnetId": 12117, "graphTooltip": 0, - "id": 6, - "iteration": 1621393773054, + "id": 10, + "iteration": 1647373958245, "links": [], "panels": [ { @@ -47,17 +53,20 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "decimals": 2, "mappings": [ { - "id": 0, - "op": "=", - "text": "No Data", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -95,14 +104,16 @@ data: "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": ["mean"], + "calcs": [ + "mean" + ], "fields": "", "values": false }, "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", @@ -128,17 +139,20 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "decimals": 2, "mappings": [ { - "id": 0, - "op": "=", - "text": "No Data", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -176,14 +190,16 @@ data: "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": ["mean"], + "calcs": [ + "mean" + ], "fields": "", "values": false }, "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", @@ -209,17 +225,20 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "decimals": 2, "mappings": [ { - "id": 0, - "op": "=", - "text": "No Data", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -257,14 +276,16 @@ data: "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": ["mean"], + "calcs": [ + "mean" + ], "fields": "", "values": false }, "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) / sum(kube_pod_container_resource_requests{namespace=\"$namespace\", resource=\"memory\"})", @@ -290,17 +311,20 @@ data: "fixedColor": "rgb(255, 255, 255)", "mode": "fixed" }, - "custom": {}, "decimals": 2, "mappings": [ { - "id": 0, - "op": "=", - "text": "No Data", - "type": 1, - "value": "null" + "options": { + "match": "null", + "result": { + "text": "No Data" + } + }, + "type": "special" } ], + "max": 1, + "min": 0, "thresholds": { "mode": "absolute", "steps": [ @@ -338,14 +362,16 @@ data: "justifyMode": "auto", "orientation": "horizontal", "reduceOptions": { - "calcs": ["mean"], + "calcs": [ + "mean" + ], "fields": "", "values": false }, "text": {}, "textMode": "auto" }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) / sum(kube_pod_container_resource_limits{namespace=\"$namespace\", resource=\"memory\"})", @@ -385,7 +411,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -417,7 +442,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -540,6 +565,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -733,7 +759,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", @@ -834,7 +860,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -867,7 +892,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -990,6 +1015,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -1256,7 +1282,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", @@ -1368,9 +1394,11 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", - "tags": ["kubernetes-mixin"], + "tags": [ + "kubernetes-mixin" + ], "templating": { "list": [ { @@ -1389,7 +1417,7 @@ data: "options": [], "query": "prometheus", "refresh": 2, - "regex": "", + "regex": "Observatorium-Dynamic", "skipUrlSync": false, "type": "datasource" }, @@ -1401,7 +1429,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -1411,7 +1439,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "Observatorium-cluster-Variable-Query" }, "refresh": 2, @@ -1419,7 +1447,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1450,7 +1477,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1462,13 +1488,31 @@ data: "to": "now" }, "timepicker": { - "refresh_intervals": ["1m", "5m", "15m", "30m", "1h", "2h", "1d"], - "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] }, "timezone": "browser", "title": "Kubernetes / Compute Resources / Namespace (Pods)", "uid": "85a562078cdf77779eaa1add43ccec1e", - "version": 2 + "version": 3 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml index e69e0a4d7..635cef524 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-namespace-workloads.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,13 +25,17 @@ data: "editable": true, "gnetId": 12118, "graphTooltip": 0, - "id": 5, - "iteration": 1621618561596, + "id": 12, + "iteration": 1647365264218, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -46,7 +56,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -78,7 +87,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -186,6 +195,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -204,6 +217,7 @@ data: "defaults": { "custom": { "align": "left", + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -320,7 +334,7 @@ data: } ] }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", @@ -424,6 +438,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -444,7 +462,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -476,7 +493,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -581,6 +598,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -599,6 +620,7 @@ data: "defaults": { "custom": { "align": "left", + "displayMode": "auto", "filterable": false }, "decimals": 0, @@ -757,7 +779,7 @@ data: } ] }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", @@ -854,7 +876,7 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", "tags": [ "kubernetes-mixin" @@ -878,7 +900,7 @@ data: "query": "prometheus", "queryValue": "", "refresh": 2, - "regex": "", + "regex": "Observatorium-Dynamic", "skipUrlSync": false, "type": "datasource" }, @@ -911,7 +933,6 @@ data: "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -924,7 +945,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -934,7 +955,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -942,7 +963,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -973,7 +993,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1009,7 +1028,7 @@ data: "timezone": "browser", "title": "Kubernetes / Compute Resources / Namespace (Workloads)", "uid": "a87fb0d919ec0ea5f6543124e16c42a5", - "version": 3 + "version": 4 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml index 968b8fd1c..54a08cb15 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-node-pods.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,13 +25,17 @@ data: "editable": true, "gnetId": 12119, "graphTooltip": 0, - "id": 7, - "iteration": 1619465861537, + "id": 13, + "iteration": 1647365265319, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -46,7 +56,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -79,7 +88,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -142,6 +151,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -163,6 +176,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -345,7 +359,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=\"$node\"}) by (pod)", @@ -428,6 +442,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -448,7 +466,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -480,7 +497,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -543,6 +560,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -564,6 +585,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -822,7 +844,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", node=\"$node\",container!=\"\",job=\"kubelet\", metrics_path=\"\/metrics\/cadvisor\", image!=\"\"}) by (pod)", @@ -934,9 +956,11 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", - "tags": ["kubernetes-mixin"], + "tags": [ + "kubernetes-mixin" + ], "templating": { "list": [ { @@ -955,7 +979,7 @@ data: "options": [], "query": "prometheus", "refresh": 2, - "regex": "", + "regex": "Observatorium-Dynamic", "skipUrlSync": false, "type": "datasource" }, @@ -967,7 +991,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -977,7 +1001,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "Observatorium-cluster-Variable-Query" }, "refresh": 2, @@ -985,7 +1009,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1016,7 +1039,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1028,13 +1050,31 @@ data: "to": "now" }, "timepicker": { - "refresh_intervals": ["1m", "5m", "15m", "30m", "1h", "2h", "1d"], - "time_options": ["5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d"] + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] }, "timezone": "browser", "title": "Kubernetes / Compute Resources / Node (Pods)", "uid": "200ac8fdbfbb74b39aff88118e4d1c2c", - "version": 3 + "version": 4 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod-ocp311.yaml new file mode 100644 index 000000000..c9d788500 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod-ocp311.yaml @@ -0,0 +1,1260 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-compute-resources-pod-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-compute-resources-pod-ocp311.json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "prometheus operator ", + "editable": true, + "gnetId": 12120, + "graphTooltip": 0, + "id": 22, + "iteration": 1639517789208, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 12, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "requests", + "color": "#F2495C", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + }, + { + "alias": "limits", + "color": "#FF9830", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "(sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}) by (container)) or\n(sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"POD\",pod_name=\"$pod\"}) by (container_name))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "legendLink": null, + "refId": "A", + "step": 10 + }, + { + "exemplar": true, + "expr": "(sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}))\n or (sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}))\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "exemplar": true, + "expr": "(sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"})) or (sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}))\n", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 13, + "panels": [], + "repeat": null, + "title": "CPU Throttling", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 2, + "interval": "4m", + "legend": { + "avg": false, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(increase(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", cluster=\"$cluster\"}[$__rate_interval])) by (container_name) /sum(increase(container_cpu_cfs_periods_total{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", cluster=\"$cluster\"}[$__rate_interval])) by (container_name)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + { + "$$hashKey": "object:126", + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.25, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Throttling", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:138", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:139", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 14, + "panels": [], + "repeat": null, + "title": "CPU Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Usage" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "CPU Requests" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "container_name" + }, + "properties": [ + { + "id": "displayName", + "value": "Container" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "displayName", + "value": "CPU Requests %" + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 3, + "links": [], + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (container_name)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container_name)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "exemplar": true, + "expr": "(sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"POD\", pod_name=\"$pod\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (container_name)/ sum(label_replace(kube_pod_container_resource_requests_cpu_cores{namespace=\"namespace\",pod=\"$pod\",cluster=\"$cluster\"}, \"container_name\", \"$1\", \"container\", \"(.*)\")) by (container_name))", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "container_name", + "Value #A", + "Value #B" + ] + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 15, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 10, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.1.3", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:345", + "alias": "requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "$$hashKey": "object:346", + "alias": "limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"POD\",container_name!=\"\",pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{container}}", + "refId": "A" + }, + { + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "refId": "B", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "refId": "C", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:389", + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "$$hashKey": "object:390", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "$datasource", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 16, + "panels": [], + "repeat": null, + "title": "Memory Quota", + "type": "row" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": null, + "displayMode": "auto", + "filterable": false + }, + "decimals": 2, + "displayName": "", + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Value #A" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #B" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #C" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Requests %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #D" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #E" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Limits %" + }, + { + "id": "unit", + "value": "percentunit" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #F" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (RSS)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #G" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Cache)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Value #H" + }, + "properties": [ + { + "id": "displayName", + "value": "Memory Usage (Swap)" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "container" + }, + "properties": [ + { + "id": "displayName", + "value": "Container" + }, + { + "id": "unit", + "value": "short" + }, + { + "id": "decimals", + "value": 2 + }, + { + "id": "custom.align", + "value": "left" + }, + { + "id": "custom.width", + "value": 400 + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 5, + "links": [], + "options": { + "showHeader": true + }, + "pluginVersion": "8.1.3", + "targets": [ + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\",pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\",pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\", pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\",pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\", pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)/ sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\", pod_name=\"$pod\",container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\", pod_name=\"$pod\",container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "exemplar": true, + "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\", pod_name=\"$pod\",container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "transformations": [ + { + "id": "merge", + "options": { + "reducers": [] + } + }, + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "container", + "Value #A", + "Value #B", + "Value #C", + "Value #D", + "Value #E", + "Value #F", + "Value #G", + "Value #H" + ] + } + } + } + ], + "type": "table" + } + ], + "refresh": "5m", + "schemaVersion": 30, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Observatorium", + "value": "Observatorium" + }, + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "ups-jb", + "value": "ups-jb" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "openshift-monitoring", + "value": "openshift-monitoring" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "alertmanager-main-0", + "value": "alertmanager-main-0" + }, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "options": [], + "query": { + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "refId": "StandardVariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Pod - OCP 3.11", + "uid": "0c2f0c6c0e0c0cccf43e658dfeda09a5", + "version": 1 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml index f1a21498f..43ea734c1 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-pod.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,13 +25,17 @@ data: "editable": true, "gnetId": 12120, "graphTooltip": 0, - "id": 7, - "iteration": 1621618845678, + "id": 14, + "iteration": 1647365265618, "links": [], "panels": [ { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -46,7 +56,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -78,7 +87,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -180,6 +189,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -200,7 +213,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -233,7 +245,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -305,6 +317,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -326,6 +342,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -509,7 +526,7 @@ data: "showHeader": true, "sortBy": [] }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", @@ -594,6 +611,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -614,7 +635,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -646,7 +666,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -750,6 +770,10 @@ data: { "collapsed": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -771,6 +795,7 @@ data: }, "custom": { "align": null, + "displayMode": "auto", "filterable": false }, "decimals": 2, @@ -1025,7 +1050,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\"}) by (container)", @@ -1137,7 +1162,7 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", "tags": [ "kubernetes-mixin" @@ -1160,7 +1185,7 @@ data: "options": [], "query": "prometheus", "refresh": 2, - "regex": "", + "regex": "Observatorium-Dynamic", "skipUrlSync": false, "type": "datasource" }, @@ -1172,7 +1197,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -1182,7 +1207,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "StandardVariableQuery" }, "refresh": 2, @@ -1190,7 +1215,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1221,7 +1245,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1252,7 +1275,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -1288,7 +1310,7 @@ data: "timezone": "", "title": "Kubernetes / Compute Resources / Pod", "uid": "6581e46e4e5c7ba40a07646395ef7b23", - "version": 2 + "version": 3 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml index 1562edbbd..bf56bfe9e 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-compute-resources-workload.yaml @@ -11,6 +11,12 @@ data: "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, "type": "dashboard" } ] @@ -19,13 +25,17 @@ data: "editable": true, "gnetId": 12121, "graphTooltip": 0, - "id": 8, - "iteration": 1621618974171, + "id": 16, + "iteration": 1647365265745, "links": [], "panels": [ { "collapsed": false, "datasource": "$dashboard", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -46,7 +56,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -79,7 +88,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -143,6 +152,10 @@ data: { "collapsed": false, "datasource": "$dashboard", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -161,6 +174,7 @@ data: "defaults": { "custom": { "align": "left", + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -247,7 +261,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", @@ -351,6 +365,10 @@ data: { "collapsed": false, "datasource": "$dashboard", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -371,7 +389,6 @@ data: "datasource": "$datasource", "fieldConfig": { "defaults": { - "custom": {}, "links": [] }, "overrides": [] @@ -403,7 +420,7 @@ data: "alertThreshold": true }, "percentage": false, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -467,6 +484,10 @@ data: { "collapsed": false, "datasource": "$dashboard", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, "gridPos": { "h": 1, "w": 24, @@ -485,6 +506,7 @@ data: "defaults": { "custom": { "align": "left", + "displayMode": "auto", "filterable": false }, "mappings": [], @@ -572,7 +594,7 @@ data: "options": { "showHeader": true }, - "pluginVersion": "7.4.2", + "pluginVersion": "8.1.3", "targets": [ { "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", @@ -673,7 +695,7 @@ data: } ], "refresh": "5m", - "schemaVersion": 27, + "schemaVersion": 30, "style": "dark", "tags": [ "kubernetes-mixin" @@ -696,7 +718,7 @@ data: "options": [], "query": "prometheus", "refresh": 2, - "regex": "", + "regex": "Observatorium-Dynamic", "skipUrlSync": false, "type": "datasource" }, @@ -708,7 +730,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -718,7 +740,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "Prometheus-cluster-Variable-Query" }, "refresh": 2, @@ -726,7 +748,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -757,7 +778,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -788,7 +808,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -819,7 +838,6 @@ data: "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", - "tags": [], "tagsQuery": "", "type": "query", "useTags": false @@ -855,7 +873,7 @@ data: "timezone": "browser", "title": "Kubernetes / Compute Resources / Workload", "uid": "a164a7f0339f99e89cea5cb47e9be617", - "version": 2 + "version": 3 } kind: ConfigMap metadata: diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-namespaces-in-cluster-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-namespaces-in-cluster-ocp311.yaml new file mode 100644 index 000000000..fd662352f --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-namespaces-in-cluster-ocp311.yaml @@ -0,0 +1,975 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-namespaces-in-cluster-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-namespaces-in-cluster-ocp311.json: | + { + "__inputs": [], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1570200640181, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 5, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (phase) ( \n ( kube_pod_status_phase{namespace=~\"$Namespace\",cluster=\"$cluster\"}\n or\n label_replace ( kube_pod_status_ready{namespace=~\"$Namespace\",condition=\"true\",cluster=\"$cluster\"}, \"phase\",\"Ready\",\"\",\"\" )\n )\n) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{phase}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Phase", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "locale", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 17, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (increase(kube_pod_container_status_restarts_total{cluster=\"$cluster\",namespace=~\"$Namespace\"}[1h])) by (namespace) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{namespace}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Restarts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "locale", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 11, + "panels": [], + "repeat": null, + "title": "CPU", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (namespace) (\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=~\"$Namespace\", cluster=\"$cluster\"}\n ) ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "type": "dashboard" + } + ], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to Namespaces", + "linkUrl": "/d/pods-in-namespaces/kubernetes-pods-in-namespace-ocp-3-11?var-datasource=$datasource&var-Namespace=$__cell&orgId=1&refresh=5m", + "pattern": "namespace", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum by (namespace) (\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=~\"$Namespace\", cluster=\"$cluster\"}\n ) ", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum by (namespace) (kube_pod_container_resource_requests_cpu_cores{namespace=~\"$Namespace\", cluster=\"$cluster\"}) ", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 13, + "panels": [], + "repeat": null, + "title": "Memory", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{container_name!=\"\",namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{namespace}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to Namespaces", + "linkUrl": "/d/pods-in-namespaces/kubernetes-pods-in-namespace-ocp-3-11?var-datasource=$datasource&var-Namespace=$__cell&orgId=1&refresh=5m", + "pattern": "namespace", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(container_memory_rss{container_name!=\"\",namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{container_name!=\"\",namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{container_name!=\"\",namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=~\"$Namespace\", cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Requests by Namespace", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_namespace_labels{cluster=\"$cluster\"},namespace)", + "hide": 0, + "includeAll": true, + "label": "Project", + "multi": true, + "name": "Namespace", + "options": [], + "query": "label_values(kube_namespace_labels{cluster=\"$cluster\"},namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Namespaces in Cluster - OCP 3.11", + "uid": "namespaces-in-cluster", + "version": 17 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml index 95c10a945..38a1afb36 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-networking-cluster.yaml @@ -1010,7 +1010,7 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "description": null, "error": null, "hide": 0, @@ -1020,7 +1020,7 @@ data: "name": "cluster", "options": [], "query": { - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refId": "Observatorium-cluster-Variable-Query" }, "refresh": 2, diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-pods-in-namespace-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-pods-in-namespace-ocp311.yaml new file mode 100644 index 000000000..cae929a14 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-pods-in-namespace-ocp311.yaml @@ -0,0 +1,1475 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-pods-in-namespace-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-pods-in-namespace-ocp311.json: | + { + "__inputs": [], + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1570200495937, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 5, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (phase) ( \n ( kube_pod_status_phase{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}\n or\n label_replace ( kube_pod_status_ready{namespace=\"$Namespace\",pod=~\"$Pod\",condition=\"true\",cluster=\"$cluster\"}, \"phase\",\"Ready\",\"\",\"\" )\n )\n) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{phase}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Phase", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "locale", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (increase(kube_pod_container_status_restarts_total{namespace=~\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"} [1h] )) by (pod) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{pod}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Restarts", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "locale", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 4, + "panels": [], + "repeat": null, + "title": "CPU Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 0, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",cluster=\"$cluster\"}) by (pod_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod_name}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "pod", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) \n/ sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) \n/ sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota (Pods)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 5, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pod", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(\n label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",container_name!=\"POD\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n , \"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(\n label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",container_name!=\"POD\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n ,\"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container) \n/ sum(kube_pod_container_resource_requests_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(\n label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$Namespace\",pod_name=~\"$Pod\",container_name!=\"POD\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n ,\"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container) \n/ sum(kube_pod_container_resource_limits_cpu_cores{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota (Containers)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 18 + }, + "id": 6, + "panels": [], + "repeat": null, + "title": "Memory Usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 19 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{namespace=\"$Namespace\", container_name!=\"\",pod_name=~\"$Pod\",cluster=\"$cluster\"}) by (pod_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{pod_name}}", + "legendLink": null, + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 19 + }, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "pod", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!=\"\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!=\"\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) \n/ sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod) ", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!=\"\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) \n/ sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod) ", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota (Pods)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "columns": [], + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "pageSize": null, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "scroll": true, + "seriesOverrides": [], + "showHeader": true, + "sort": { + "col": 2, + "desc": true + }, + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": null, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "pod", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "container", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(\n label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!=\"\",pod_name=~\"$Pod\",container_name!=\"POD\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n ,\"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container) ", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(\n label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!~\"POD|\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n ,\"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container) \n/ sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(\n label_replace(container_memory_usage_bytes{namespace=\"$Namespace\",container_name!~\"POD|\",pod_name=~\"$Pod\",cluster=\"$cluster\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")\n ,\"container\", \"$1\", \"container_name\", \"(.*)\")\n ) by (pod,container) \n/ sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$Namespace\",pod=~\"$Pod\",cluster=\"$cluster\"}) by (pod,container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota (Containers)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "refresh": "10s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_namespace_labels{cluster=\"$cluster\"},namespace)", + "hide": 0, + "includeAll": false, + "label": "Project", + "multi": false, + "name": "Namespace", + "options": [], + "query": "label_values(kube_namespace_labels{cluster=\"$cluster\"},namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{cluster=\"$cluster\",namespace=\"$Namespace\"}, pod) ", + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "Pod", + "options": [], + "query": "label_values(kube_pod_info{cluster=\"$cluster\",namespace=\"$Namespace\"}, pod) ", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": true, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Pods in Namespace - OCP 3.11", + "uid": "pods-in-namespaces", + "version": 33 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-summary-by-node-ocp311.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-summary-by-node-ocp311.yaml new file mode 100644 index 000000000..3feaeb446 --- /dev/null +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-k8s-summary-by-node-ocp311.yaml @@ -0,0 +1,2216 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: grafana-dashboard-k8s-summary-by-node-ocp311 + namespace: open-cluster-management-observability + annotations: + observability.open-cluster-management.io/dashboard-folder: "OCP 3.11" +data: + k8s-summary-by-node-ocp311.json: | + { + "__inputs": [], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "iteration": 1569876999182, + "links": [], + "panels": [ + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 0 + }, + "id": 576, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": null, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Node", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "node", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "max by (node,nodetype)(label_join(label_replace(label_replace(label_replace(kube_node_labels{cluster=\"$cluster\"},\"nodetype_master\",\"master\",\"label_node_role_kubernetes_io_master\",\"true\"),\"nodetype_infra\",\"infra\",\"label_node_role_kubernetes_io_infra\",\"true\"),\"nodetype_compute\",\"compute\",\"label_node_role_kubernetes_io_compute\",\"true\"),\"nodetype\", \" \", \"nodetype_master\", \"nodetype_infra\", \"nodetype_compute\"))", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "Nodes", + "transform": "table", + "type": "table" + }, + { + "cacheTimeout": null, + "colorBackground": true, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 8, + "y": 0 + }, + "id": 404, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(max(kube_node_status_condition{status=\"true\",condition!=\"Ready\",cluster=\"$cluster\"} > 0) by (node))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0,1", + "title": "Node health", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "OK", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "columns": [], + "datasource": "$datasource", + "fontSize": "100%", + "gridPos": { + "h": 5, + "w": 8, + "x": 12, + "y": 0 + }, + "id": 444, + "links": [], + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 2, + "desc": true + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "max(kube_node_status_condition{status=\"true\",condition!=\"Ready\",cluster=\"$cluster\"} > 0) by (node,condition)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "refId": "A" + } + ], + "title": "Node issues", + "transform": "table", + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 2, + "panels": [], + "repeat": "nodetype", + "title": "Summary - $nodetype", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 6 + }, + "id": 4, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n node:node_cpu_utilisation:avg1m{cluster=\"$cluster\"} * on (node) \n (kube_node_status_capacity_cpu_cores{cluster=\"$cluster\"} * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}) \n) \n/ \nsum( \n kube_node_status_capacity_cpu_cores{cluster=\"$cluster\"} * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "CPU node %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 4, + "y": 6 + }, + "id": 9, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ( \n label_replace( \n rate(container_cpu_usage_seconds_total{job=\"kubelet\", image!=\"\", container_name!=\"\",cluster=\"$cluster\"}[$__rate_interval]), \n \"pod\", \"$1\", \"pod_name\", \"(.*)\" \n ) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n) \n/ \nsum ( \n kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "CPU usage/allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 3, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 8, + "y": 6 + }, + "id": 102, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ( \n label_replace( \n rate(container_cpu_usage_seconds_total{job=\"kubelet\", image!=\"\", container_name!=\"\",cluster=\"$cluster\"}[$__rate_interval]), \n \"pod\", \"$1\", \"pod_name\", \"(.*)\" \n ) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)\n/\nsum( \n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "2,2.5", + "title": "CPU usage/requests %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 6 + }, + "id": 123, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)\n/ \nsum ( \n kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n )", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "CPU requests/allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 5, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 6 + }, + "id": 172, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)\n/ \nsum ( \n kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n )", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "3,4", + "title": "CPU limits/allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 6 + }, + "id": 37, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\nkube_node_status_capacity_cpu_cores{cluster=\"$cluster\"}\n* on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "A" + }, + { + "expr": "sum(\nkube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"}\n* on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Allocatable", + "refId": "B" + }, + { + "expr": "sum( \n node:node_cpu_utilisation:avg1m{cluster=\"$cluster\"} \n * on (node) (\n kube_node_status_capacity_cpu_cores{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n )\n) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Node usage", + "refId": "C" + }, + { + "expr": "sum (\nlabel_replace( \n rate(container_cpu_usage_seconds_total{job=\"kubelet\", image!=\"\", container_name!=\"\",cluster=\"$cluster\"}[$__rate_interval]), \n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n)\n* on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"}\n* on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Usage", + "refId": "D" + }, + { + "expr": "sum( \n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Requests", + "refId": "E" + }, + { + "expr": "sum( \n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Limits", + "refId": "F" + }, + { + "expr": "", + "format": "time_series", + "intervalFactor": 1, + "refId": "G" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 12 + }, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n (node_memory_MemTotal_bytes{cluster=\"$cluster\"} - node_memory_MemAvailable_bytes{cluster=\"$cluster\"}) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n) \n/ \nsum( \n node:node_memory_bytes_total:sum{cluster=\"$cluster\"} * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "Memory node %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 4, + "y": 12 + }, + "id": 25, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ( \n label_replace( \n container_memory_usage_bytes{pod_name!=\"\",container_name!=\"\",cluster=\"$cluster\"}, \n \"pod\", \"$1\", \"pod_name\", \"(.*)\" ) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n ) \n/ \nsum ( \n kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "Memory allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 3, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 8, + "y": 12 + }, + "id": 66, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum ( \n label_replace( \n container_memory_usage_bytes{pod_name!=\"\",container_name!=\"\",cluster=\"$cluster\"}, \n \"pod\", \"$1\", \"pod_name\", \"(.*)\" ) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n ) \n/\nsum( \n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "2,2.5", + "title": "Memory usage/requests %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 1, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 12 + }, + "id": 83, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n )\n/\nsum ( \n kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n )", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "0.8,0.9", + "title": "Memory requests/allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 1, + "format": "percentunit", + "gauge": { + "maxValue": 5, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 12 + }, + "id": 199, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum( \n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n )\n/\nsum ( \n kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n )", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": "3,4", + "title": "Memory limits/allocatable %", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 12 + }, + "id": 51, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n kube_node_status_capacity_memory_bytes{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Total", + "refId": "A" + }, + { + "expr": "sum(\n kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Allocatable", + "refId": "B" + }, + { + "expr": "sum( \n (node_memory_MemTotal_bytes{cluster=\"$cluster\"} - node_memory_MemAvailable_bytes{cluster=\"$cluster\"}) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Node usage", + "refId": "C" + }, + { + "expr": "sum ( \n label_replace( \n container_memory_usage_bytes{pod_name!=\"\",container_name!=\"\",cluster=\"$cluster\"}, \n \"pod\", \"$1\", \"pod_name\", \"(.*)\" \n ) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Usage", + "refId": "D" + }, + { + "expr": "sum( \n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Requests", + "refId": "E" + }, + { + "expr": "sum( \n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Limits", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 18 + }, + "id": 147, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) (\n node:node_net_utilisation:sum_irate{cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network (Total)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 18 + }, + "id": 228, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) (\n irate(node_network_receive_bytes{job=\"node-exporter\",device!~\"veth.+\",cluster=\"$cluster\"}[$__rate_interval])\n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network (Receive)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 18 + }, + "id": 259, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) (\n irate(node_network_transmit_bytes{job=\"node-exporter\",device!~\"veth.+\",cluster=\"$cluster\"}[$__rate_interval]) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network (Transmit)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 24 + }, + "id": 292, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) ( \n irate(node_disk_bytes_read[$__rate_interval]) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk bytes read", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 24 + }, + "id": 327, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) ( \n irate(node_disk_bytes_written[$__rate_interval]) \n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk bytes write", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 24 + }, + "id": 364, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeatedByRow": true, + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_disk_utilisation:avg_irate{cluster=\"$cluster\"}\n* on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} \n", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 30 + }, + "id": 446, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (node) (\n node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"}\n * on (namespace, pod) group_left kube_pod_status_ready{condition=\"true\",cluster=\"$cluster\"}\n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{node}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pods ready", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 30 + }, + "id": 487, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (phase) ( \n ( kube_pod_status_phase{cluster=\"$cluster\"}\n or\n label_replace ( kube_pod_status_ready{condition=\"true\",cluster=\"$cluster\"}, \"phase\",\"Ready\",\"\",\"\" )\n )\n * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"} \n * on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"}\n) ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{phase}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pods phase", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 30 + }, + "id": 530, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max by (device,pod,namespace)(\n node_filesystem_size{fstype=~\"ext[234]|btrfs|xfs|zfs|nfs.*\",cluster=\"$cluster\"} - node_filesystem_avail{fstype=~\"ext[234]|btrfs|xfs|zfs|nfs.*\",cluster=\"$cluster\"}\n)\n/ \nmax by (device,pod,namespace)(\n node_filesystem_size{fstype=~\"ext[234]|btrfs|xfs|zfs|nfs.*\",cluster=\"$cluster\"}\n)\n* on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"}\n* on (node) group_left kube_node_labels{label_node_role_kubernetes_io_$nodetype=\"true\",cluster=\"$cluster\"} ", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{node}} - {{device}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Filesystem %", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 16, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": {}, + "datasource": "$datasource", + "definition": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(kube_pod_info{clusterType=\"ocp3\"},cluster)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "text": "master + infra + compute", + "value": [ + "master", + "infra", + "compute" + ] + }, + "hide": 0, + "includeAll": false, + "label": "Node type", + "multi": true, + "name": "nodetype", + "options": [ + { + "selected": true, + "text": "master", + "value": "master" + }, + { + "selected": true, + "text": "infra", + "value": "infra" + }, + { + "selected": true, + "text": "compute", + "value": "compute" + } + ], + "query": "master,infra,compute", + "skipUrlSync": false, + "type": "custom" + }, + { + "current": { + "selected": true, + "text": "prometheus", + "value": "prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Summary by Node Type - OCP 3.11", + "uid": "summary-by-nodetype", + "version": 42 + } diff --git a/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml b/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml index 68909ae10..57fa6a1b2 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/dash-node-rsrc-use.yaml @@ -1056,14 +1056,14 @@ data: "value": "" }, "datasource": "$datasource", - "definition": "label_values(cluster)", + "definition": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "hide": 0, "includeAll": false, "label": "Cluster", "multi": false, "name": "cluster", "options": [], - "query": "label_values(cluster)", + "query": "label_values(kube_pod_info{clusterType!=\"ocp3\"},cluster)", "refresh": 2, "regex": "", "skipUrlSync": false, diff --git a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml index 4459eea38..26f60e181 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/deployment.yaml @@ -43,7 +43,7 @@ spec: - args: - -config=/etc/grafana/grafana.ini image: quay.io/stolostron/grafana:2.4.0-SNAPSHOT-2021-09-23-07-02-14 - imagePullPolicy: Always + imagePullPolicy: IfNotPresent name: grafana ports: - containerPort: 3001 @@ -70,7 +70,7 @@ spec: fieldRef: fieldPath: metadata.namespace image: quay.io/stolostron/grafana-dashboard-loader:2.3.0-SNAPSHOT-2021-07-26-18-43-26 - imagePullPolicy: Always + imagePullPolicy: IfNotPresent resources: requests: cpu: 4m diff --git a/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml b/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml index 247ea1827..3c6c2f6c2 100644 --- a/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml +++ b/operators/multiclusterobservability/manifests/base/grafana/kustomization.yaml @@ -8,7 +8,6 @@ resources: - service.yaml - dash-acm-optimization-overview.yaml - dash-acm-clusters-overview.yaml -- dash-acm-clusters-overview-ocp311.yaml - dash-k8s-etcd.yaml - dash-k8s-apiserver.yaml - dash-k8s-networking-cluster.yaml @@ -22,3 +21,11 @@ resources: - dash-k8s-service-level-overview-api-server-cluster.yaml - dash-cluster-rsrc-use.yaml - dash-node-rsrc-use.yaml +- dash-acm-clusters-overview-ocp311.yaml +- dash-acm-optimization-overview-ocp311.yaml +- dash-k8s-capacity-planning-ocp311.yaml +- dash-k8s-compute-resources-namespace-pods-ocp311.yaml +- dash-k8s-compute-resources-pod-ocp311.yaml +- dash-k8s-namespaces-in-cluster-ocp311.yaml +- dash-k8s-pods-in-namespace-ocp311.yaml +- dash-k8s-summary-by-node-ocp311.yaml diff --git a/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml b/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml index 09b88f218..9ab454605 100644 --- a/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml +++ b/operators/multiclusterobservability/manifests/base/proxy/deployment.yaml @@ -42,7 +42,7 @@ spec: containers: - name: rbac-query-proxy image: quay.io/stolostron/rbac-query-proxy:2.3.0-SNAPSHOT-2021-07-26-18-43-26 - imagePullPolicy: Always + imagePullPolicy: IfNotPresent args: - "--listen-address=0.0.0.0:8080" - "--metrics-server=https://{{OBSERVATORIUM_NAME}}-observatorium-api.{{MCO_NAMESPACE}}.svc.cluster.local:8080/api/metrics/v1/default" @@ -88,8 +88,8 @@ spec: - --skip-provider-button=true - --openshift-ca=/etc/pki/tls/cert.pem - --openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt - image: quay.io/stolostron/origin-oauth-proxy:2.0.11-SNAPSHOT-2021-04-29-18-29-17 - imagePullPolicy: Always + image: quay.io/stolostron/origin-oauth-proxy:4.5 + imagePullPolicy: IfNotPresent name: oauth-proxy ports: - containerPort: 8443 diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml index bfbad4dea..26f9ec846 100644 --- a/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml +++ b/operators/multiclusterobservability/manifests/endpoint-observability/images.yaml @@ -3,9 +3,10 @@ apiVersion: v1 metadata: name: images-list data: - prometheus: "quay.io/stolostron/prometheus:2.4.0-SNAPSHOT-2021-08-19-08-22-43" - prometheus-config-reloader: "quay.io/openshift/origin-configmap-reloader:4.5.0" - kube_state_metrics: "quay.io/stolostron/kube-state-metrics:2.4.0-SNAPSHOT-2021-08-19-08-22-43" - node_exporter: "quay.io/stolostron/node-exporter:2.4.0-SNAPSHOT-2021-08-19-08-22-43" - kube_rbac_proxy: "quay.io/stolostron/kube-rbac-proxy:2.4.0-SNAPSHOT-2021-08-19-08-22-43" - metrics_collector: "quay.io/stolostron/metrics-collector:2.4.0-SNAPSHOT-2021-08-19-08-22-43" + prometheus: "quay.io/stolostron/prometheus:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + prometheus_config_reloader: "quay.io/stolostron/prometheus-config-reloader:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + prometheus_operator: "quay.io/stolostron/prometheus-operator:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + kube_state_metrics: "quay.io/stolostron/kube-state-metrics:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + node_exporter: "quay.io/stolostron/node-exporter:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + kube_rbac_proxy: "quay.io/stolostron/kube-rbac-proxy:2.5.0-SNAPSHOT-2022-02-03-20-02-33" + metrics_collector: "quay.io/stolostron/metrics-collector:2.5.0-SNAPSHOT-2022-02-03-20-02-33" diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml index e107c7db9..9d65a10a6 100644 --- a/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml +++ b/operators/multiclusterobservability/manifests/endpoint-observability/operator.yaml @@ -23,7 +23,7 @@ spec: requests: cpu: 2m memory: 50Mi - imagePullPolicy: Always + imagePullPolicy: IfNotPresent ports: - containerPort: 8383 name: metrics diff --git a/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml b/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml index c4e7c0ad9..63660de66 100644 --- a/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml +++ b/operators/multiclusterobservability/manifests/endpoint-observability/role.yaml @@ -4,11 +4,11 @@ metadata: name: open-cluster-management:endpoint-observability-operator rules: - apiGroups: - - "" + - apiextensions.k8s.io resources: - - namespaces + - customresourcedefinitions verbs: - - get + - '*' - apiGroups: - "" resources: @@ -25,14 +25,10 @@ rules: - pods - secrets - services + - services/finalizers - serviceaccounts verbs: - - create - - get - - list - - watch - - delete - - update + - '*' - apiGroups: - apps resources: @@ -41,12 +37,7 @@ rules: - replicasets - statefulsets verbs: - - get - - list - - watch - - create - - update - - delete + - '*' - apiGroups: - rbac.authorization.k8s.io resources: @@ -55,12 +46,7 @@ rules: - roles - rolebindings verbs: - - get - - list - - watch - - create - - update - - delete + - '*' - apiGroups: - observability.open-cluster-management.io resources: @@ -103,12 +89,7 @@ rules: resources: - leases verbs: - - get - - list - - watch - - create - - update - - delete + - '*' - apiGroups: - "" resources: @@ -152,9 +133,7 @@ rules: - replicationcontrollers - resourcequotas verbs: - - get - - list - - watch + - '*' - apiGroups: - admissionregistration.k8s.io resources: @@ -208,3 +187,20 @@ rules: - get - list - watch +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - alertmanagers/finalizers + - alertmanagerconfigs + - prometheuses + - prometheuses/finalizers + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - probes + - prometheusrules + verbs: + - '*' + diff --git a/operators/multiclusterobservability/pkg/certificates/cert_agent.go b/operators/multiclusterobservability/pkg/certificates/cert_agent.go index 417131611..1665d361d 100644 --- a/operators/multiclusterobservability/pkg/certificates/cert_agent.go +++ b/operators/multiclusterobservability/pkg/certificates/cert_agent.go @@ -18,7 +18,10 @@ const ( type ObservabilityAgent struct{} -func (o *ObservabilityAgent) Manifests(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { +func (o *ObservabilityAgent) Manifests( + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn, +) ([]runtime.Object, error) { return nil, nil } diff --git a/operators/multiclusterobservability/pkg/certificates/cert_controller.go b/operators/multiclusterobservability/pkg/certificates/cert_controller.go index 2a3f0e3fd..49815a3d2 100644 --- a/operators/multiclusterobservability/pkg/certificates/cert_controller.go +++ b/operators/multiclusterobservability/pkg/certificates/cert_controller.go @@ -68,8 +68,12 @@ func Start(c client.Client, ingressCtlCrdExists bool) { log.Error(err, "Failed to create kube client") os.Exit(1) } - watchlist := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "secrets", config.GetDefaultNamespace(), - fields.OneTermEqualSelector("metadata.namespace", config.GetDefaultNamespace())) + watchlist := cache.NewListWatchFromClient( + kubeClient.CoreV1().RESTClient(), + "secrets", + config.GetDefaultNamespace(), + fields.OneTermEqualSelector("metadata.namespace", config.GetDefaultNamespace()), + ) _, controller := cache.NewInformer( watchlist, &v1.Secret{}, @@ -169,7 +173,11 @@ func onDelete(c client.Client) func(obj interface{}) { Name: config.GetMonitoringCRName(), }, mco) if err == nil { - log.Info("secret for ca certificate deleted by mistake, add the cert back to the new created one", "name", s.Name) + log.Info( + "secret for ca certificate deleted by mistake, add the cert back to the new created one", + "name", + s.Name, + ) i := 0 for { caSecret := &v1.Secret{} @@ -182,8 +190,10 @@ func onDelete(c client.Client) func(obj interface{}) { err = c.Update(context.TODO(), caSecret) if err != nil { log.Error(err, "Failed to update secret for ca certificate", "name", s.Name) + i++ + } else { + break } - break } else { // wait mco operator recreate the ca certificate at most 30 seconds if i < 6 { diff --git a/operators/multiclusterobservability/pkg/certificates/certificates.go b/operators/multiclusterobservability/pkg/certificates/certificates.go index 99b4f44ac..4a8afdde5 100644 --- a/operators/multiclusterobservability/pkg/certificates/certificates.go +++ b/operators/multiclusterobservability/pkg/certificates/certificates.go @@ -46,7 +46,12 @@ var ( serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) ) -func CreateObservabilityCerts(c client.Client, scheme *runtime.Scheme, mco *mcov1beta2.MultiClusterObservability, ingressCtlCrdExists bool) error { +func CreateObservabilityCerts( + c client.Client, + scheme *runtime.Scheme, + mco *mcov1beta2.MultiClusterObservability, + ingressCtlCrdExists bool, +) error { config.SetCertDuration(mco.Annotations) @@ -209,6 +214,9 @@ func createCertSecret(c client.Client, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: config.GetDefaultNamespace(), + Labels: map[string]string{ + config.BackupLabelName: config.BackupLabelValue, + }, }, Data: map[string][]byte{ "ca.crt": caCertBytes, @@ -340,7 +348,11 @@ func getCA(c client.Client, isServer bool) (*x509.Certificate, *rsa.PrivateKey, caCertName = clientCACerts } caSecret := &corev1.Secret{} - err := c.Get(context.TODO(), types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: caCertName}, caSecret) + err := c.Get( + context.TODO(), + types.NamespacedName{Namespace: config.GetDefaultNamespace(), Name: caCertName}, + caSecret, + ) if err != nil { log.Error(err, "Failed to get ca secret", "name", caCertName) return nil, nil, nil, err diff --git a/operators/multiclusterobservability/pkg/config/config.go b/operators/multiclusterobservability/pkg/config/config.go index 491cf9d53..bdaead480 100644 --- a/operators/multiclusterobservability/pkg/config/config.go +++ b/operators/multiclusterobservability/pkg/config/config.go @@ -98,6 +98,8 @@ const ( ValidatingWebhookConfigurationName = "multicluster-observability-operator" WebhookServiceName = "multicluster-observability-webhook-service" + BackupLabelName = "cluster.open-cluster-management.io/backup" + BackupLabelValue = "" ) const ( @@ -123,9 +125,10 @@ const ( MemcachedExporterKey = "memcached_exporter" MemcachedExporterImgTag = "v0.9.0" - GrafanaImgKey = "grafana" - GrafanaDashboardLoaderName = "grafana-dashboard-loader" - GrafanaDashboardLoaderKey = "grafana_dashboard_loader" + GrafanaImgKey = "grafana" + GrafanaDashboardLoaderName = "grafana-dashboard-loader" + GrafanaDashboardLoaderKey = "grafana_dashboard_loader" + GrafanaCustomDashboardLabel = "grafana-custom-dashboard" AlertManagerImgName = "prometheus-alertmanager" AlertManagerImgKey = "prometheus_alertmanager" @@ -220,7 +223,7 @@ const ( DeleteDelay = "48h" BlockDuration = "2h" - DefaultImagePullPolicy = "Always" + DefaultImagePullPolicy = "IfNotPresent" DefaultImagePullSecret = "multiclusterhub-operator-pull-secret" ResourceLimits = "limits" @@ -238,6 +241,11 @@ const ( StorageVersionMigrationCrdName = "storageversionmigrations.migration.k8s.io" ) +const ( + ResourceTypeConfigMap = "ConfigMap" + ResourceTypeSecret = "Secret" +) + // ObjectStorgeConf is used to Unmarshal from bytes to do validation type ObjectStorgeConf struct { Type string `yaml:"type"` @@ -277,6 +285,19 @@ var ( MemoryLimitMB = int32(1024) ConnectionLimit = int32(1024) MaxItemSize = "1m" + + BackupResourceMap = map[string]string{ + AllowlistCustomConfigMapName: ResourceTypeConfigMap, + AlertRuleCustomConfigMapName: ResourceTypeConfigMap, + AlertmanagerConfigName: ResourceTypeConfigMap, + + AlertmanagerRouteBYOCAName: ResourceTypeSecret, + AlertmanagerRouteBYOCERTName: ResourceTypeSecret, + ProxyRouteBYOCAName: ResourceTypeSecret, + ProxyRouteBYOCERTName: ResourceTypeSecret, + } + + CollectRulesEnabled bool = true ) func GetReplicas(component string, advanced *observabilityv1beta2.AdvancedConfig) *int32 { @@ -440,8 +461,13 @@ func GetObsAPIHost(client client.Client, namespace string) (string, error) { err := client.Get(context.TODO(), types.NamespacedName{Name: obsAPIGateway, Namespace: namespace}, found) if err != nil && errors.IsNotFound(err) { - // if the observatorium-api router is not created yet, fallback to get host from the domain of ingresscontroller - domain, err := getDomainForIngressController(client, OpenshiftIngressOperatorCRName, OpenshiftIngressOperatorNamespace) + // if the observatorium-api router is not created yet, fallback to get host + // from the domain of ingresscontroller + domain, err := getDomainForIngressController( + client, + OpenshiftIngressOperatorCRName, + OpenshiftIngressOperatorNamespace, + ) if err != nil { return "", nil } @@ -467,7 +493,11 @@ func GetAlertmanagerEndpoint(client client.Client, namespace string) (string, er err := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteName, Namespace: namespace}, found) if err != nil && errors.IsNotFound(err) { // if the alertmanager router is not created yet, fallback to get host from the domain of ingresscontroller - domain, err := getDomainForIngressController(client, OpenshiftIngressOperatorCRName, OpenshiftIngressOperatorNamespace) + domain, err := getDomainForIngressController( + client, + OpenshiftIngressOperatorCRName, + OpenshiftIngressOperatorNamespace, + ) if err != nil { return "", nil } @@ -496,14 +526,26 @@ func getDomainForIngressController(client client.Client, name, namespace string) func GetAlertmanagerRouterCA(client client.Client) (string, error) { amRouteBYOCaSrt := &corev1.Secret{} amRouteBYOCertSrt := &corev1.Secret{} - err1 := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteBYOCAName, Namespace: GetDefaultNamespace()}, amRouteBYOCaSrt) - err2 := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagerRouteBYOCERTName, Namespace: GetDefaultNamespace()}, amRouteBYOCertSrt) + err1 := client.Get( + context.TODO(), + types.NamespacedName{Name: AlertmanagerRouteBYOCAName, Namespace: GetDefaultNamespace()}, + amRouteBYOCaSrt, + ) + err2 := client.Get( + context.TODO(), + types.NamespacedName{Name: AlertmanagerRouteBYOCERTName, Namespace: GetDefaultNamespace()}, + amRouteBYOCertSrt, + ) if err1 == nil && err2 == nil { return string(amRouteBYOCaSrt.Data["tls.crt"]), nil } ingressOperator := &operatorv1.IngressController{} - err := client.Get(context.TODO(), types.NamespacedName{Name: OpenshiftIngressOperatorCRName, Namespace: OpenshiftIngressOperatorNamespace}, ingressOperator) + err := client.Get( + context.TODO(), + types.NamespacedName{Name: OpenshiftIngressOperatorCRName, Namespace: OpenshiftIngressOperatorNamespace}, + ingressOperator, + ) if err != nil { return "", err } @@ -515,7 +557,11 @@ func GetAlertmanagerRouterCA(client client.Client) (string, error) { } routerCASecret := &corev1.Secret{} - err = client.Get(context.TODO(), types.NamespacedName{Name: routerCASrtName, Namespace: OpenshiftIngressNamespace}, routerCASecret) + err = client.Get( + context.TODO(), + types.NamespacedName{Name: routerCASrtName, Namespace: OpenshiftIngressNamespace}, + routerCASecret, + ) if err != nil { return "", err } @@ -525,7 +571,11 @@ func GetAlertmanagerRouterCA(client client.Client) (string, error) { // GetAlertmanagerCA is used to get the CA of Alertmanager func GetAlertmanagerCA(client client.Client) (string, error) { amCAConfigmap := &corev1.ConfigMap{} - err := client.Get(context.TODO(), types.NamespacedName{Name: AlertmanagersDefaultCaBundleName, Namespace: GetDefaultNamespace()}, amCAConfigmap) + err := client.Get( + context.TODO(), + types.NamespacedName{Name: AlertmanagersDefaultCaBundleName, Namespace: GetDefaultNamespace()}, + amCAConfigmap, + ) if err != nil { return "", err } @@ -662,7 +712,12 @@ func SetCertDuration(annotations map[string]string) { if annotations != nil && annotations[AnnotationCertDuration] != "" { d, err := time.ParseDuration(annotations[AnnotationCertDuration]) if err != nil { - log.Error(err, "Failed to parse cert duration, use default one", "annotation", annotations[AnnotationCertDuration]) + log.Error( + err, + "Failed to parse cert duration, use default one", + "annotation", + annotations[AnnotationCertDuration], + ) } else { certDuration = d return diff --git a/operators/multiclusterobservability/pkg/config/obj_storage_conf.go b/operators/multiclusterobservability/pkg/config/obj_storage_conf.go index 3fec9c9a2..11ba78543 100644 --- a/operators/multiclusterobservability/pkg/config/obj_storage_conf.go +++ b/operators/multiclusterobservability/pkg/config/obj_storage_conf.go @@ -5,20 +5,22 @@ package config import ( "errors" + "net/http" "strings" + "github.com/prometheus/common/model" "gopkg.in/yaml.v2" ) // Config is for s3/azure/gcs compatiable configuration type Config struct { // s3 configuration - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - Insecure bool `yaml:"insecure"` - AccessKey string `yaml:"access_key"` - SecretKey string `yaml:"secret_key"` - + Bucket string `yaml:"bucket"` + Endpoint string `yaml:"endpoint"` + Insecure bool `yaml:"insecure"` + AccessKey string `yaml:"access_key"` + SecretKey string `yaml:"secret_key"` + HTTPConfig HTTPConfig `yaml:"http_config"` // azure configuration // Bucket string `yaml:"bucket"` StorageAccount string `yaml:"storage_account"` @@ -31,6 +33,38 @@ type Config struct { ServiceAccount string `yaml:"service_account"` } +// HTTPConfig stores the http.Transport configuration for the s3 minio client. +type HTTPConfig struct { + IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` + ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + + TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` + ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` + MaxIdleConns int `yaml:"max_idle_conns"` + MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` + MaxConnsPerHost int `yaml:"max_conns_per_host"` + + // Allow upstream callers to inject a round tripper + Transport http.RoundTripper `yaml:"-"` + + TLSConfig TLSConfig `yaml:"tls_config"` +} + +// TLSConfig configures the options for TLS connections. +type TLSConfig struct { + // The CA cert to use for the targets. + CAFile string `yaml:"ca_file"` + // The client cert file for the targets. + CertFile string `yaml:"cert_file"` + // The client key file for the targets. + KeyFile string `yaml:"key_file"` + // Used to verify the hostname for the targets. + ServerName string `yaml:"server_name"` + // Disable target certificate validation. + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` +} + // CheckObjStorageConf is used to check/valid the object storage configurations func CheckObjStorageConf(data []byte) (bool, error) { var objectConfg ObjectStorgeConf diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go b/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go index 716f3a030..1c3d57f5e 100644 --- a/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go +++ b/operators/multiclusterobservability/pkg/rendering/renderer_alertmanager.go @@ -54,7 +54,9 @@ func (r *MCORenderer) renderAlertManagerStatefulSet(res *resource.Resource, dep.Spec.Replicas = mcoconfig.GetReplicas(mcoconfig.Alertmanager, r.cr.Spec.AdvancedConfig) spec := &dep.Spec.Template.Spec - spec.Containers[0].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + + imagePullPolicy := mcoconfig.GetImagePullPolicy(r.cr.Spec) + spec.Containers[0].ImagePullPolicy = imagePullPolicy args := spec.Containers[0].Args if *dep.Spec.Replicas > 1 { @@ -69,7 +71,7 @@ func (r *MCORenderer) renderAlertManagerStatefulSet(res *resource.Resource, spec.Containers[0].Args = args spec.Containers[0].Resources = mcoconfig.GetResources(mcoconfig.Alertmanager, r.cr.Spec.AdvancedConfig) - spec.Containers[1].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + spec.Containers[1].ImagePullPolicy = imagePullPolicy spec.NodeSelector = r.cr.Spec.NodeSelector spec.Tolerations = r.cr.Spec.Tolerations spec.ImagePullSecrets = []corev1.LocalObjectReference{ @@ -99,6 +101,7 @@ func (r *MCORenderer) renderAlertManagerStatefulSet(res *resource.Resource, if found { spec.Containers[2].Image = image } + spec.Containers[2].ImagePullPolicy = imagePullPolicy //replace the volumeClaimTemplate dep.Spec.VolumeClaimTemplates[0].Spec.StorageClassName = &r.cr.Spec.StorageConfig.StorageClass dep.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go b/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go index f42b78c1e..4623e8328 100644 --- a/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go +++ b/operators/multiclusterobservability/pkg/rendering/renderer_grafana.go @@ -47,6 +47,7 @@ func (r *MCORenderer) renderGrafanaDeployments(res *resource.Resource, dep.Spec.Replicas = config.GetReplicas(config.Grafana, r.cr.Spec.AdvancedConfig) spec := &dep.Spec.Template.Spec + imagePullPolicy := config.GetImagePullPolicy(r.cr.Spec) spec.Containers[0].Image = config.DefaultImgRepository + "/" + config.GrafanaImgKey + ":" + config.DefaultImgTagSuffix @@ -54,6 +55,7 @@ func (r *MCORenderer) renderGrafanaDeployments(res *resource.Resource, if found { spec.Containers[0].Image = image } + spec.Containers[0].ImagePullPolicy = imagePullPolicy spec.Containers[0].Resources = config.GetResources(config.Grafana, r.cr.Spec.AdvancedConfig) spec.Containers[1].Image = config.DefaultImgRepository + "/" + config.GrafanaDashboardLoaderName + @@ -63,6 +65,7 @@ func (r *MCORenderer) renderGrafanaDeployments(res *resource.Resource, if found { spec.Containers[1].Image = image } + spec.Containers[1].ImagePullPolicy = imagePullPolicy unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) if err != nil { diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go b/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go index 2860925f7..91b6ec71b 100644 --- a/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go +++ b/operators/multiclusterobservability/pkg/rendering/renderer_proxy.go @@ -55,16 +55,22 @@ func (r *MCORenderer) renderProxyDeployment(res *resource.Resource, dep.Spec.Replicas = config.GetReplicas(config.RBACQueryProxy, r.cr.Spec.AdvancedConfig) spec := &dep.Spec.Template.Spec - spec.Containers[0].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + imagePullPolicy := config.GetImagePullPolicy(r.cr.Spec) + spec.Containers[0].ImagePullPolicy = imagePullPolicy args0 := spec.Containers[0].Args for idx := range args0 { args0[idx] = strings.Replace(args0[idx], "{{MCO_NAMESPACE}}", mcoconfig.GetDefaultNamespace(), 1) - args0[idx] = strings.Replace(args0[idx], "{{OBSERVATORIUM_NAME}}", mcoconfig.GetOperandName(mcoconfig.Observatorium), 1) + args0[idx] = strings.Replace( + args0[idx], + "{{OBSERVATORIUM_NAME}}", + mcoconfig.GetOperandName(mcoconfig.Observatorium), + 1, + ) } spec.Containers[0].Args = args0 spec.Containers[0].Resources = mcoconfig.GetResources(mcoconfig.RBACQueryProxy, r.cr.Spec.AdvancedConfig) - spec.Containers[1].ImagePullPolicy = mcoconfig.GetImagePullPolicy(r.cr.Spec) + spec.Containers[1].ImagePullPolicy = imagePullPolicy args1 := spec.Containers[1].Args for idx := range args1 { args1[idx] = strings.Replace(args1[idx], "{{MCO_NAMESPACE}}", mcoconfig.GetDefaultNamespace(), 1) diff --git a/operators/multiclusterobservability/pkg/rendering/renderer_test.go b/operators/multiclusterobservability/pkg/rendering/renderer_test.go index efcc6d01e..731a906ec 100644 --- a/operators/multiclusterobservability/pkg/rendering/renderer_test.go +++ b/operators/multiclusterobservability/pkg/rendering/renderer_test.go @@ -29,7 +29,7 @@ func TestRender(t *testing.T) { TypeMeta: metav1.TypeMeta{Kind: "MultiClusterObservability"}, ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test"}, Spec: mcov1beta2.MultiClusterObservabilitySpec{ - ImagePullPolicy: "Always", + ImagePullPolicy: "IfNotPresent", ImagePullSecret: "test", StorageConfig: &mcov1beta2.StorageConfig{ MetricObjectStorage: &mcoshared.PreConfiguredStorage{ diff --git a/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go index 45c82f11a..a9663f3ee 100644 --- a/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go +++ b/operators/multiclusterobservability/pkg/servicemonitor/sm_controller.go @@ -43,8 +43,12 @@ func Start() { log.Error(err, "Failed to create prom client") os.Exit(1) } - watchlist := cache.NewListWatchFromClient(promClient.MonitoringV1().RESTClient(), "servicemonitors", config.GetDefaultNamespace(), - fields.Everything()) + watchlist := cache.NewListWatchFromClient( + promClient.MonitoringV1().RESTClient(), + "servicemonitors", + config.GetDefaultNamespace(), + fields.Everything(), + ) _, controller := cache.NewInformer( watchlist, &promv1.ServiceMonitor{}, @@ -73,7 +77,9 @@ func onDelete(promClient promclientset.Interface) func(obj interface{}) { return func(obj interface{}) { sm := obj.(*promv1.ServiceMonitor) if sm.ObjectMeta.OwnerReferences != nil && sm.ObjectMeta.OwnerReferences[0].Kind == "Observatorium" { - err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Delete(context.TODO(), sm.Name, metav1.DeleteOptions{}) + err := promClient.MonitoringV1(). + ServiceMonitors(ocpMonitoringNamespace). + Delete(context.TODO(), sm.Name, metav1.DeleteOptions{}) if err != nil { log.Error(err, "Failed to delete ServiceMonitor", "namespace", ocpMonitoringNamespace, "name", sm.Name) } else { @@ -95,7 +101,9 @@ func onUpdate(promClient promclientset.Interface) func(newObj interface{}, oldOb } func updateServiceMonitor(promClient promclientset.Interface, sm *promv1.ServiceMonitor) { - found, err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Get(context.TODO(), sm.Name, metav1.GetOptions{}) + found, err := promClient.MonitoringV1(). + ServiceMonitors(ocpMonitoringNamespace). + Get(context.TODO(), sm.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { _, err := promClient.MonitoringV1().ServiceMonitors(ocpMonitoringNamespace).Create(context.TODO(), diff --git a/operators/multiclusterobservability/pkg/util/backuputil.go b/operators/multiclusterobservability/pkg/util/backuputil.go new file mode 100644 index 000000000..56efae815 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/backuputil.go @@ -0,0 +1,80 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package util + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config" +) + +func AddBackupLabelToConfigMap(c client.Client, name, namespace string) error { + m := &corev1.ConfigMap{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: namespace, + }, m) + + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + log.Error(err, "ConfigMap not found", "ConfigMap", name) + return nil + } else { + return err + } + } + + if _, ok := m.ObjectMeta.Labels[config.BackupLabelName]; !ok { + if m.ObjectMeta.Labels == nil { + m.ObjectMeta.Labels = make(map[string]string) + } + m.ObjectMeta.Labels[config.BackupLabelName] = config.BackupLabelValue + err := c.Update(context.TODO(), m) + if err != nil { + return err + } else { + log.Info("Add backup label for configMap", "name", name) + } + + } + return nil +} + +func AddBackupLabelToSecret(c client.Client, name, namespace string) error { + s := &corev1.Secret{} + err := c.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: namespace, + }, s) + + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + log.Error(err, "Secret not found", "Secret", name) + return nil + } else { + return err + } + } + + if _, ok := s.ObjectMeta.Labels[config.BackupLabelName]; !ok { + if s.ObjectMeta.Labels == nil { + s.ObjectMeta.Labels = make(map[string]string) + } + s.ObjectMeta.Labels[config.BackupLabelName] = config.BackupLabelValue + err := c.Update(context.TODO(), s) + if err != nil { + return err + } else { + log.Info("Add backup label for secret", "name", name) + } + } + return nil +} diff --git a/operators/multiclusterobservability/pkg/util/client.go b/operators/multiclusterobservability/pkg/util/client.go index de9c5b7b9..21ff662e0 100644 --- a/operators/multiclusterobservability/pkg/util/client.go +++ b/operators/multiclusterobservability/pkg/util/client.go @@ -109,19 +109,30 @@ func CheckCRDExist(crdClient crdClientSet.Interface, crdName string) (bool, erro } func UpdateCRDWebhookNS(crdClient crdClientSet.Interface, namespace, crdName string) error { - crdObj, err := crdClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{}) + crdObj, err := crdClient.ApiextensionsV1(). + CustomResourceDefinitions(). + Get(context.TODO(), crdName, metav1.GetOptions{}) if err != nil { log.Error(err, "failed to get CRD", "CRD", crdName) return err } - if crdObj.Spec.Conversion == nil || crdObj.Spec.Conversion.Webhook == nil || crdObj.Spec.Conversion.Webhook.ClientConfig == nil { + if crdObj.Spec.Conversion == nil || crdObj.Spec.Conversion.Webhook == nil || + crdObj.Spec.Conversion.Webhook.ClientConfig == nil { log.Error(err, "empty Conversion in the CRD", "CRD", crdName) return fmt.Errorf("empty Conversion in the CRD %s", crdName) } if crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace != namespace { - log.Info("updating the webhook service namespace", "original namespace", crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace, "new namespace", namespace) + log.Info( + "updating the webhook service namespace", + "original namespace", + crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace, + "new namespace", + namespace, + ) crdObj.Spec.Conversion.Webhook.ClientConfig.Service.Namespace = namespace - _, err := crdClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crdObj, metav1.UpdateOptions{}) + _, err := crdClient.ApiextensionsV1(). + CustomResourceDefinitions(). + Update(context.TODO(), crdObj, metav1.UpdateOptions{}) if err != nil { log.Error(err, "failed to update webhook service namespace") return err diff --git a/operators/multiclusterobservability/pkg/util/managedclusteraddon.go b/operators/multiclusterobservability/pkg/util/managedclusteraddon.go index 4b59fa2c6..631d98274 100644 --- a/operators/multiclusterobservability/pkg/util/managedclusteraddon.go +++ b/operators/multiclusterobservability/pkg/util/managedclusteraddon.go @@ -54,7 +54,14 @@ func CreateManagedClusterAddonCR(c client.Client, namespace, labelKey, labelValu ); err != nil && errors.IsNotFound(err) { // create new managedClusterAddon if err := c.Create(context.TODO(), newManagedClusterAddon); err != nil { - log.Error(err, "failed to create managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + log.Error( + err, + "failed to create managedclusteraddon", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) return err } @@ -72,7 +79,14 @@ func CreateManagedClusterAddonCR(c client.Client, namespace, labelKey, labelValu } return false, err }); errPoll != nil { - log.Error(errPoll, "failed to get the created managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + log.Error( + errPoll, + "failed to get the created managedclusteraddon", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) return errPoll } @@ -106,7 +120,14 @@ func CreateManagedClusterAddonCR(c client.Client, namespace, labelKey, labelValu } // update status for the created managedclusteraddon if err := c.Status().Update(context.TODO(), managedClusterAddon); err != nil { - log.Error(err, "failed to update status for managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + log.Error( + err, + "failed to update status for managedclusteraddon", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) return err } return nil @@ -117,16 +138,35 @@ func CreateManagedClusterAddonCR(c client.Client, namespace, labelKey, labelValu // managedclusteraddon already exists, updating... if !reflect.DeepEqual(managedClusterAddon.Spec, newManagedClusterAddon.Spec) { - log.Info("found difference, updating managedClusterAddon", "name", ManagedClusterAddonName, "namespace", namespace) + log.Info( + "found difference, updating managedClusterAddon", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) newManagedClusterAddon.ObjectMeta.ResourceVersion = managedClusterAddon.ObjectMeta.ResourceVersion err := c.Update(context.TODO(), newManagedClusterAddon) if err != nil { - log.Error(err, "failed to update managedclusteraddon", "name", ManagedClusterAddonName, "namespace", namespace) + log.Error( + err, + "failed to update managedclusteraddon", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) return err } return nil } - log.Info("ManagedClusterAddOn is created or updated successfully", "name", ManagedClusterAddonName, "namespace", namespace) + log.Info( + "ManagedClusterAddOn is created or updated successfully", + "name", + ManagedClusterAddonName, + "namespace", + namespace, + ) return nil } diff --git a/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go b/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go new file mode 100644 index 000000000..3bde7c614 --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/remotewriteendpoint.go @@ -0,0 +1,205 @@ +package util + +import ( + "path" + + "github.com/prometheus/common/config" +) + +const MountPath = "/var/run/secrets/" + +type TLSConfigWithSecret struct { + // Name of the secret which contains the file + SecretName string `yaml:"secret_name,omitempty" json:"secret_name,omitempty"` + // The CA cert to use for the targets. + CAFileKey string `yaml:"ca_file_key,omitempty" json:"ca_file_key,omitempty"` + // The client cert file for the targets. + CertFileKey string `yaml:"cert_file_key,omitempty" json:"cert_file_key,omitempty"` + // The client key file for the targets. + KeyFileKey string `yaml:"key_file_key,omitempty" json:"key_file_key,omitempty"` + // Used to verify the hostname for the targets. + ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` + // Disable target certificate validation. + InsecureSkipVerify bool `yaml:"insecure_skip_verify" json:"insecure_skip_verify"` +} + +type OAuth2WithSecret struct { + ClientID string `yaml:"client_id" json:"client_id"` + ClientSecret config.Secret `yaml:"client_secret" json:"client_secret"` + // Name of the secret which contains the file + SecretName string `yaml:"secret_name,omitempty" json:"secret_name,omitempty"` + ClientSecretFileKey string `yaml:"client_secret_file_key" json:"client_secret_file_key"` + Scopes []string `yaml:"scopes,omitempty" json:"scopes,omitempty"` + TokenURL string `yaml:"token_url" json:"token_url"` + EndpointParams map[string]string `yaml:"endpoint_params,omitempty" json:"endpoint_params,omitempty"` + + // TLSConfig is used to connect to the token URL. + TLSConfig TLSConfigWithSecret `yaml:"tls_config,omitempty"` +} + +type BasicAuthWithSecret struct { + Username string `yaml:"username" json:"username"` + Password config.Secret `yaml:"password,omitempty" json:"password,omitempty"` + // Name of the secret which contains the file + SecretName string `yaml:"secret_name,omitempty" json:"secret_name,omitempty"` + PasswordFileKey string `yaml:"password_file_key,omitempty" json:"password_file_key,omitempty"` +} + +type AuthorizationWithSecret struct { + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Credentials config.Secret `yaml:"credentials,omitempty" json:"credentials,omitempty"` + // Name of the secret which contains the file + SecretName string `yaml:"secret_name,omitempty" json:"secret_name,omitempty"` + CredentialsFileKey string `yaml:"credentials_file_key,omitempty" json:"credentials_file_key,omitempty"` +} + +type HTTPClientConfigWithSecret struct { + // The HTTP basic authentication credentials for the targets. + BasicAuth *BasicAuthWithSecret `yaml:"basic_auth,omitempty" json:"basic_auth,omitempty"` + // The HTTP authorization credentials for the targets. + Authorization *AuthorizationWithSecret `yaml:"authorization,omitempty" json:"authorization,omitempty"` + // The OAuth2 client credentials used to fetch a token for the targets. + OAuth2 *OAuth2WithSecret `yaml:"oauth2,omitempty" json:"oauth2,omitempty"` + // The bearer token for the targets. Deprecated in favour of + // Authorization.Credentials. + BearerToken config.Secret `yaml:"bearer_token,omitempty" json:"bearer_token,omitempty"` + // Name of the secret which contains the file + SecretName string `yaml:"secret_name,omitempty" json:"secret_name,omitempty"` + // The bearer token file for the targets. Deprecated in favour of + // Authorization.CredentialsFile. + BearerTokenFileKey string `yaml:"bearer_token_file_key,omitempty" json:"bearer_token_file_key,omitempty"` + // HTTP proxy server to use to connect to the targets. + ProxyURL *config.URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` + // TLSConfig to use to connect to the targets. + TLSConfig *TLSConfigWithSecret `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` + // FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + // The omitempty flag is not set, because it would be hidden from the + // marshalled configuration when set to false. + FollowRedirects bool `yaml:"follow_redirects" json:"follow_redirects"` +} + +type RemoteWriteEndpointWithSecret struct { + Name string `yaml:"name" json:"name"` + URL config.URL `yaml:"url" json:"url"` + HttpClientConfig *HTTPClientConfigWithSecret `yaml:"http_client_config,omitempty" json:"http_client_config,omitempty"` +} + +type RemoteWriteEndpoint struct { + Name string `yaml:"name" json:"name"` + URL config.URL `yaml:"url" json:"url"` + HttpClientConfig *config.HTTPClientConfig `yaml:"http_client_config,omitempty" json:"http_client_config,omitempty"` +} + +func getMountPath(secretName, key string) string { + return path.Join(MountPath, secretName, key) +} + +func transformBasicAuth(old BasicAuthWithSecret) *config.BasicAuth { + basicAuth := &config.BasicAuth{ + Username: old.Username, + } + if old.Password != "" { + basicAuth.Password = old.Password + } + if old.SecretName != "" { + basicAuth.PasswordFile = getMountPath(old.SecretName, old.PasswordFileKey) + } + return basicAuth +} + +func transformTLSConfig(old TLSConfigWithSecret) config.TLSConfig { + tlsConfig := config.TLSConfig{ + InsecureSkipVerify: old.InsecureSkipVerify, + } + if old.SecretName != "" { + tlsConfig.ServerName = old.ServerName + } + if old.SecretName != "" { + if old.CAFileKey != "" { + tlsConfig.CAFile = getMountPath(old.SecretName, old.CAFileKey) + } + if old.CertFileKey != "" { + tlsConfig.CertFile = getMountPath(old.SecretName, old.CertFileKey) + } + if old.KeyFileKey != "" { + tlsConfig.KeyFile = getMountPath(old.SecretName, old.KeyFileKey) + } + } + return tlsConfig +} + +func transformAuthorization(old AuthorizationWithSecret) *config.Authorization { + auth := &config.Authorization{} + if old.Type != "" { + auth.Type = old.Type + } + if old.Credentials != "" { + auth.Credentials = old.Credentials + } + if old.SecretName != "" { + auth.CredentialsFile = getMountPath(old.SecretName, old.CredentialsFileKey) + } + return auth +} + +func transformOAuth2(old OAuth2WithSecret) *config.OAuth2 { + oauth2 := &config.OAuth2{ + ClientID: old.ClientID, + ClientSecret: old.ClientSecret, + ClientSecretFile: old.ClientSecretFileKey, + TokenURL: old.TokenURL, + } + if old.Scopes != nil { + oauth2.Scopes = old.Scopes + } + if old.EndpointParams != nil { + oauth2.EndpointParams = old.EndpointParams + } + if old.SecretName != "" { + oauth2.ClientSecretFile = getMountPath(old.SecretName, old.ClientSecretFileKey) + } + + return oauth2 +} + +func Transform(oldClientConfig HTTPClientConfigWithSecret) (*config.HTTPClientConfig, []string) { + sNames := []string{} + clientConfig := &config.HTTPClientConfig{ + FollowRedirects: oldClientConfig.FollowRedirects, + } + if oldClientConfig.BearerToken != "" { + clientConfig.BearerToken = oldClientConfig.BearerToken + } + if oldClientConfig.SecretName != "" { + clientConfig.BearerTokenFile = getMountPath(oldClientConfig.SecretName, oldClientConfig.BearerTokenFileKey) + sNames = append(sNames, oldClientConfig.SecretName) + } + if oldClientConfig.ProxyURL != nil { + clientConfig.ProxyURL = *oldClientConfig.ProxyURL + } + if oldClientConfig.BasicAuth != nil { + clientConfig.BasicAuth = transformBasicAuth(*oldClientConfig.BasicAuth) + if oldClientConfig.BasicAuth.SecretName != "" { + sNames = append(sNames, oldClientConfig.BasicAuth.SecretName) + } + } + if oldClientConfig.TLSConfig != nil && oldClientConfig.TLSConfig.SecretName != "" { + clientConfig.TLSConfig = transformTLSConfig(*oldClientConfig.TLSConfig) + if oldClientConfig.TLSConfig.SecretName != "" { + sNames = append(sNames, oldClientConfig.TLSConfig.SecretName) + } + } + if oldClientConfig.Authorization != nil { + clientConfig.Authorization = transformAuthorization(*oldClientConfig.Authorization) + if oldClientConfig.Authorization.SecretName != "" { + sNames = append(sNames, oldClientConfig.Authorization.SecretName) + } + } + if oldClientConfig.OAuth2 != nil { + clientConfig.OAuth2 = transformOAuth2(*oldClientConfig.OAuth2) + if oldClientConfig.OAuth2.SecretName != "" { + sNames = append(sNames, oldClientConfig.OAuth2.SecretName) + } + } + return clientConfig, sNames +} diff --git a/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go b/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go new file mode 100644 index 000000000..e49e4760d --- /dev/null +++ b/operators/multiclusterobservability/pkg/util/remotewriteendpoint_test.go @@ -0,0 +1,83 @@ +package util + +import ( + "fmt" + "path" + "testing" +) + +const ( + basicAuthSName = "basic_secret" + basicAuthSKey = "basic_key" + AuthSName = "auth_secret" + AuthSKey = "auth_key" + OAuth2SName = "oauth2_secret" + OAuth2SKey = "oauth2_key" + BearerTokenSName = "bearertoken_secret" + BearerTokenSKey = "bearertoken_key" + TLSSName = "tls_secret" + TLSCAKey = "tls_ca_key" + TLSCertKey = "tls_cert_key" + TLSKeyKey = "tls_key_key" +) + +func TestTransform(t *testing.T) { + config := &HTTPClientConfigWithSecret{ + BasicAuth: &BasicAuthWithSecret{ + Username: "user", + Password: "pwd", + SecretName: basicAuthSName, + PasswordFileKey: basicAuthSKey, + }, + Authorization: &AuthorizationWithSecret{ + SecretName: AuthSName, + CredentialsFileKey: AuthSKey, + }, + OAuth2: &OAuth2WithSecret{ + ClientID: "client_id", + SecretName: OAuth2SName, + ClientSecretFileKey: OAuth2SKey, + }, + SecretName: BearerTokenSName, + BearerTokenFileKey: BearerTokenSKey, + TLSConfig: &TLSConfigWithSecret{ + SecretName: TLSSName, + CAFileKey: TLSCAKey, + CertFileKey: TLSCertKey, + KeyFileKey: TLSKeyKey, + }, + } + newConfig, names := Transform(*config) + + if newConfig.BasicAuth.PasswordFile != fmt.Sprintf(path.Join(MountPath, basicAuthSName, basicAuthSKey)) { + t.Fatalf("Wrong path for BasicAuth.PasswordFile: %s", newConfig.BasicAuth.PasswordFile) + } + + if newConfig.Authorization.CredentialsFile != fmt.Sprintf(path.Join(MountPath, AuthSName, AuthSKey)) { + t.Fatalf("Wrong path for Authorization.CredentialsFile: %s", newConfig.Authorization.CredentialsFile) + } + + if newConfig.OAuth2.ClientSecretFile != fmt.Sprintf(path.Join(MountPath, OAuth2SName, OAuth2SKey)) { + t.Fatalf("Wrong path for OAuth2.ClientSecretFile: %s", newConfig.OAuth2.ClientSecretFile) + } + + if newConfig.BearerTokenFile != fmt.Sprintf(path.Join(MountPath, BearerTokenSName, BearerTokenSKey)) { + t.Fatalf("Wrong path for BearerTokenFile: %s", newConfig.BearerTokenFile) + } + + if newConfig.TLSConfig.CAFile != fmt.Sprintf(path.Join(MountPath, TLSSName, TLSCAKey)) { + t.Fatalf("Wrong path for TLSConfig.CAFile: %s", newConfig.TLSConfig.CAFile) + } + + if newConfig.TLSConfig.CertFile != fmt.Sprintf(path.Join(MountPath, TLSSName, TLSCertKey)) { + t.Fatalf("Wrong path for TLSConfig.CertFile: %s", newConfig.TLSConfig.CertFile) + } + + if newConfig.TLSConfig.KeyFile != fmt.Sprintf(path.Join(MountPath, TLSSName, TLSKeyKey)) { + t.Fatalf("Wrong path for TLSConfig.KeyFile: %s", newConfig.TLSConfig.KeyFile) + } + + if len(names) != 5 { + t.Fatalf("Wrong number of mount secrets: expect 5, get %d", len(names)) + } +} diff --git a/operators/multiclusterobservability/pkg/webhook/webhook_controller.go b/operators/multiclusterobservability/pkg/webhook/webhook_controller.go index 4800ac902..cf151a710 100644 --- a/operators/multiclusterobservability/pkg/webhook/webhook_controller.go +++ b/operators/multiclusterobservability/pkg/webhook/webhook_controller.go @@ -25,7 +25,11 @@ type WebhookController struct { } // NewWebhookController create the WebhookController. -func NewWebhookController(client client.Client, mwh *admissionregistrationv1.MutatingWebhookConfiguration, vwh *admissionregistrationv1.ValidatingWebhookConfiguration) *WebhookController { +func NewWebhookController( + client client.Client, + mwh *admissionregistrationv1.MutatingWebhookConfiguration, + vwh *admissionregistrationv1.ValidatingWebhookConfiguration, +) *WebhookController { return &WebhookController{ client: client, mutatingWebhook: mwh, @@ -39,14 +43,23 @@ func NewWebhookController(client client.Client, mwh *admissionregistrationv1.Mut // currently the controller will not watch the change of the webhook configurations. func (wc *WebhookController) Start(ctx context.Context) error { if wc.mutatingWebhook != nil { - log.V(1).Info("creating or updating the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + log.V(1).Info( + "creating or updating the mutatingwebhookconfiguration", + "mutatingwebhookconfiguration", + wc.mutatingWebhook.GetName()) foundMwhc := &admissionregistrationv1.MutatingWebhookConfiguration{} - if err := wc.client.Get(context.TODO(), types.NamespacedName{Name: wc.mutatingWebhook.GetName()}, foundMwhc); err != nil && apierrors.IsNotFound(err) { + err := wc.client.Get( + context.TODO(), + types.NamespacedName{Name: wc.mutatingWebhook.GetName()}, foundMwhc) + if err != nil && apierrors.IsNotFound(err) { if err := wc.client.Create(context.TODO(), wc.mutatingWebhook); err != nil { - log.V(1).Info("failed to create the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) + log.V(1).Info("failed to create the mutatingwebhookconfiguration", + "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), + "error", err) return err } - log.V(1).Info("the mutatingwebhookconfiguration is created", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) + log.V(1).Info("the mutatingwebhookconfiguration is created", + "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName()) } else if err != nil { log.V(1).Info("failed to check the mutatingwebhookconfiguration", "mutatingwebhookconfiguration", wc.mutatingWebhook.GetName(), "error", err) return err @@ -71,14 +84,19 @@ func (wc *WebhookController) Start(ctx context.Context) error { } if wc.validatingWebhook != nil { - log.V(1).Info("creating or updating the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + log.V(1).Info("creating or updating the validatingwebhookconfiguration", + "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) foundVwhc := &admissionregistrationv1.ValidatingWebhookConfiguration{} - if err := wc.client.Get(context.TODO(), types.NamespacedName{Name: wc.validatingWebhook.GetName()}, foundVwhc); err != nil && apierrors.IsNotFound(err) { + if err := wc.client.Get(context.TODO(), types.NamespacedName{Name: wc.validatingWebhook.GetName()}, foundVwhc); err != nil && + apierrors.IsNotFound(err) { if err := wc.client.Create(context.TODO(), wc.validatingWebhook); err != nil { - log.V(1).Info("failed to create the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) + log.V(1).Info("failed to create the validatingwebhookconfiguration", + "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), + "error", err) return err } - log.V(1).Info("the validatingwebhookconfiguration is created", "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) + log.V(1).Info("the validatingwebhookconfiguration is created", + "validatingwebhookconfiguration", wc.validatingWebhook.GetName()) } else if err != nil { log.V(1).Info("failed to check the validatingwebhookconfiguration", "validatingwebhookconfiguration", wc.validatingWebhook.GetName(), "error", err) return err @@ -105,7 +123,9 @@ func (wc *WebhookController) Start(ctx context.Context) error { // wait for context done signal <-ctx.Done() - // currently kubernetes prevents terminating pod from deleting kubernetes resources(including validatingwebhookconfiguration...), see: https://kubernetes.io/blog/2021/05/14/using-finalizers-to-control-deletion/ + // currently kubernetes prevents terminating pod from deleting kubernetes resources(including + // validatingwebhookconfiguration...), see: + // https://kubernetes.io/blog/2021/05/14/using-finalizers-to-control-deletion/ // that's why the deleting webhook configuration code is commented /* log.V(1).Info("Shutdown signal received, waiting for the webhook cleanup.") diff --git a/operators/pkg/config/config.go b/operators/pkg/config/config.go index 50445fbb3..669aa78fa 100644 --- a/operators/pkg/config/config.go +++ b/operators/pkg/config/config.go @@ -33,28 +33,21 @@ const ( KubeRbacProxyImgName = "kube-rbac-proxy" KubeRbacProxyKey = "kube_rbac_proxy" - ConfigmapReloaderImgName = "origin-configmap-reloader" - ConfigmapReloaderKey = "prometheus-config-reloader" + PrometheusOperatorImgName = "prometheus-operator" + PrometheusOperatorKey = "prometheus_operator" + + PrometheusConfigmapReloaderImgName = "prometheus-config-reloader" + PrometheusConfigmapReloaderKey = "prometheus_config_reloader" ) var ( ImageKeyNameMap = map[string]string{ - PrometheusKey: PrometheusKey, - KubeStateMetricsKey: KubeStateMetricsImgName, - NodeExporterKey: NodeExporterImgName, - KubeRbacProxyKey: KubeRbacProxyImgName, - MetricsCollectorKey: MetricsCollectorImgName, - ConfigmapReloaderKey: ConfigmapReloaderImgName, + PrometheusKey: PrometheusKey, + KubeStateMetricsKey: KubeStateMetricsImgName, + NodeExporterKey: NodeExporterImgName, + KubeRbacProxyKey: KubeRbacProxyImgName, + MetricsCollectorKey: MetricsCollectorImgName, + PrometheusConfigmapReloaderKey: PrometheusConfigmapReloaderImgName, } ) -// HubInfo is the struct that contains the common information about the hub -// cluster, for example the name of managed cluster on the hub, the URL of -// observatorium api gateway, the URL of hub alertmanager and the CA for the -// hub router -type HubInfo struct { - ClusterName string `yaml:"cluster-name"` - ObservatoriumAPIEndpoint string `yaml:"observatorium-api-endpoint"` - AlertmanagerEndpoint string `yaml:"alertmanager-endpoint"` - AlertmanagerRouterCA string `yaml:"alertmanager-router-ca"` -} diff --git a/operators/pkg/config/types.go b/operators/pkg/config/types.go new file mode 100644 index 000000000..6e20b4692 --- /dev/null +++ b/operators/pkg/config/types.go @@ -0,0 +1,53 @@ +// Copyright (c) 2022 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package config + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HubInfo is the struct that contains the common information about the hub +// cluster, for example the name of managed cluster on the hub, the URL of +// observatorium api gateway, the URL of hub alertmanager and the CA for the +// hub router +type HubInfo struct { + ClusterName string `yaml:"cluster-name"` + ObservatoriumAPIEndpoint string `yaml:"observatorium-api-endpoint"` + AlertmanagerEndpoint string `yaml:"alertmanager-endpoint"` + AlertmanagerRouterCA string `yaml:"alertmanager-router-ca"` +} + +type RecordingRule struct { + Record string `yaml:"record"` + Expr string `yaml:"expr"` +} +type CollectRule struct { + Collect string `yaml:"collect"` + Annotations map[string]string `yaml:"annotations"` + Expr string `yaml:"expr"` + For string `yaml:"for"` + NameList []string `yaml:"names"` + MatchList []string `yaml:"matches"` +} + +type CollectRuleSelector struct { + MatchExpression []metav1.LabelSelectorRequirement `yaml:"matchExpressions"` +} + +// CollectRuleGroup structure contains information of a group of collect rules used for +// dnamically collecting metrics. +type CollectRuleGroup struct { + Name string `yaml:"name"` + Annotations map[string]string `yaml:"annotations"` + Selector CollectRuleSelector `yaml:"selector"` + CollectRuleList []CollectRule `yaml:"rules"` +} +type MetricsAllowlist struct { + NameList []string `yaml:"names"` + MatchList []string `yaml:"matches"` + RenameMap map[string]string `yaml:"renames"` + RuleList []RecordingRule `yaml:"rules"` //deprecated + RecordingRuleList []RecordingRule `yaml:"recording_rules"` + CollectRuleGroupList []CollectRuleGroup `yaml:"collect_rules"` +} diff --git a/operators/pkg/deploying/deployer.go b/operators/pkg/deploying/deployer.go index 7dba6bd2a..9172e8914 100644 --- a/operators/pkg/deploying/deployer.go +++ b/operators/pkg/deploying/deployer.go @@ -9,9 +9,11 @@ import ( "fmt" "strings" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -36,13 +38,16 @@ type Deployer struct { func NewDeployer(client client.Client) *Deployer { deployer := &Deployer{client: client} deployer.deployerFns = map[string]deployerFn{ - "Deployment": deployer.updateDeployment, - "StatefulSet": deployer.updateStatefulSet, - "Service": deployer.updateService, - "ConfigMap": deployer.updateConfigMap, - "Secret": deployer.updateSecret, - "ClusterRole": deployer.updateClusterRole, - "ClusterRoleBinding": deployer.updateClusterRoleBinding, + "Deployment": deployer.updateDeployment, + "StatefulSet": deployer.updateStatefulSet, + "Service": deployer.updateService, + "ConfigMap": deployer.updateConfigMap, + "Secret": deployer.updateSecret, + "ClusterRole": deployer.updateClusterRole, + "ClusterRoleBinding": deployer.updateClusterRoleBinding, + "CustomResourceDefinition": deployer.updateCRD, + "Prometheus": deployer.updatePrometheus, + "PrometheusRule": deployer.updatePrometheusRule, } return deployer } @@ -51,7 +56,11 @@ func NewDeployer(client client.Client) *Deployer { func (d *Deployer) Deploy(obj *unstructured.Unstructured) error { found := &unstructured.Unstructured{} found.SetGroupVersionKind(obj.GroupVersionKind()) - err := d.client.Get(context.TODO(), types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, found) + err := d.client.Get( + context.TODO(), + types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, + found, + ) if err != nil { if errors.IsNotFound(err) { log.Info("Create", "Kind:", obj.GroupVersionKind(), "Name:", obj.GetName()) @@ -243,3 +252,70 @@ func (d *Deployer) updateClusterRoleBinding(desiredObj, runtimeObj *unstructured } return nil } + +func (d *Deployer) updateCRD(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimeCRD := &apiextensionsv1.CustomResourceDefinition{} + err := json.Unmarshal(runtimeJSON, runtimeCRD) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime CRD %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredCRD := &apiextensionsv1.CustomResourceDefinition{} + err = json.Unmarshal(desiredJSON, desiredCRD) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal CRD %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredCRD.Spec, runtimeCRD.Spec) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredCRD) + } + + return nil +} + +func (d *Deployer) updatePrometheus(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimePrometheus := &prometheusv1.Prometheus{} + err := json.Unmarshal(runtimeJSON, runtimePrometheus) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime Prometheus %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredPrometheus := &prometheusv1.Prometheus{} + err = json.Unmarshal(desiredJSON, desiredPrometheus) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal Prometheus %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredPrometheus.Spec, runtimePrometheus.Spec) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredPrometheus) + } + return nil +} + +func (d *Deployer) updatePrometheusRule(desiredObj, runtimeObj *unstructured.Unstructured) error { + runtimeJSON, _ := runtimeObj.MarshalJSON() + runtimePrometheusRule := &prometheusv1.PrometheusRule{} + err := json.Unmarshal(runtimeJSON, runtimePrometheusRule) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal runtime PrometheusRule %s", runtimeObj.GetName())) + } + + desiredJSON, _ := desiredObj.MarshalJSON() + desiredPrometheusRule := &prometheusv1.PrometheusRule{} + err = json.Unmarshal(desiredJSON, desiredPrometheusRule) + if err != nil { + log.Error(err, fmt.Sprintf("Failed to Unmarshal PrometheusRule %s", runtimeObj.GetName())) + } + + if !apiequality.Semantic.DeepDerivative(desiredPrometheusRule.Spec, runtimePrometheusRule.Spec) { + log.Info("Update", "Kind:", runtimeObj.GroupVersionKind(), "Name:", runtimeObj.GetName()) + return d.client.Update(context.TODO(), desiredPrometheusRule) + } + return nil +} diff --git a/operators/pkg/deploying/deployer_test.go b/operators/pkg/deploying/deployer_test.go index 1f975b5ae..415d4d46e 100644 --- a/operators/pkg/deploying/deployer_test.go +++ b/operators/pkg/deploying/deployer_test.go @@ -273,7 +273,7 @@ func TestDeploy(t *testing.T) { }, Data: map[string][]byte{ "username": []byte("YWRtaW4="), - "password": []byte(""), + "password": []byte("MWYyZDFlMmU2N2Rm"), }, }, validateResults: func(client client.Client) { diff --git a/operators/pkg/rendering/patching/patcher_test.go b/operators/pkg/rendering/patching/patcher_test.go index b23a441d0..b8e3bbef4 100644 --- a/operators/pkg/rendering/patching/patcher_test.go +++ b/operators/pkg/rendering/patching/patcher_test.go @@ -59,7 +59,7 @@ func TestApplyGlobalPatches(t *testing.T) { Annotations: map[string]string{"mco-imageRepository": "quay.io/stolostron"}, }, Spec: mcov1beta2.MultiClusterObservabilitySpec{ - ImagePullPolicy: "Always", + ImagePullPolicy: "IfNotPresent", ImagePullSecret: "test", }, } diff --git a/operators/pkg/rendering/renderer.go b/operators/pkg/rendering/renderer.go index 8421734e4..ac924475f 100644 --- a/operators/pkg/rendering/renderer.go +++ b/operators/pkg/rendering/renderer.go @@ -25,24 +25,31 @@ type Renderer struct { func NewRenderer() *Renderer { renderer := &Renderer{} renderer.renderFns = map[string]RenderFn{ - "Deployment": renderer.RenderDeployments, - "StatefulSet": renderer.RenderNamespace, - "DaemonSet": renderer.RenderNamespace, - "Service": renderer.RenderNamespace, - "ServiceAccount": renderer.RenderNamespace, - "ConfigMap": renderer.RenderNamespace, - "ClusterRole": renderer.RenderClusterRole, - "ClusterRoleBinding": renderer.RenderClusterRoleBinding, - "Secret": renderer.RenderNamespace, - "Role": renderer.RenderNamespace, - "RoleBinding": renderer.RenderNamespace, - "Ingress": renderer.RenderNamespace, - "PersistentVolumeClaim": renderer.RenderNamespace, + "Deployment": renderer.RenderDeployments, + "StatefulSet": renderer.RenderNamespace, + "DaemonSet": renderer.RenderNamespace, + "Service": renderer.RenderNamespace, + "ServiceAccount": renderer.RenderNamespace, + "ConfigMap": renderer.RenderNamespace, + "ClusterRole": renderer.RenderClusterRole, + "ClusterRoleBinding": renderer.RenderClusterRoleBinding, + "Secret": renderer.RenderNamespace, + "Role": renderer.RenderNamespace, + "RoleBinding": renderer.RenderNamespace, + "Ingress": renderer.RenderNamespace, + "PersistentVolumeClaim": renderer.RenderNamespace, + "Prometheus": renderer.RenderNamespace, + "PrometheusRule": renderer.RenderNamespace, + "CustomResourceDefinition": renderer.RenderNamespace, } return renderer } -func (r *Renderer) RenderTemplates(templates []*resource.Resource, namespace string, labels map[string]string) ([]*unstructured.Unstructured, error) { +func (r *Renderer) RenderTemplates( + templates []*resource.Resource, + namespace string, + labels map[string]string, +) ([]*unstructured.Unstructured, error) { uobjs := []*unstructured.Unstructured{} for _, template := range templates { render, ok := r.renderFns[template.GetKind()] @@ -64,7 +71,11 @@ func (r *Renderer) RenderTemplates(templates []*resource.Resource, namespace str return uobjs, nil } -func (r *Renderer) RenderDeployments(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { +func (r *Renderer) RenderDeployments( + res *resource.Resource, + namespace string, + labels map[string]string, +) (*unstructured.Unstructured, error) { /* err := patching.ApplyGlobalPatches(res, r.cr) if err != nil { return nil, err @@ -75,7 +86,11 @@ func (r *Renderer) RenderDeployments(res *resource.Resource, namespace string, l return u, nil } -func (r *Renderer) RenderNamespace(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { +func (r *Renderer) RenderNamespace( + res *resource.Resource, + namespace string, + labels map[string]string, +) (*unstructured.Unstructured, error) { u := &unstructured.Unstructured{Object: res.Map()} if UpdateNamespace(u) { res.SetNamespace(namespace) @@ -84,7 +99,11 @@ func (r *Renderer) RenderNamespace(res *resource.Resource, namespace string, lab return u, nil } -func (r *Renderer) RenderClusterRole(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { +func (r *Renderer) RenderClusterRole( + res *resource.Resource, + namespace string, + labels map[string]string, +) (*unstructured.Unstructured, error) { u := &unstructured.Unstructured{Object: res.Map()} cLabels := u.GetLabels() @@ -99,7 +118,11 @@ func (r *Renderer) RenderClusterRole(res *resource.Resource, namespace string, l return u, nil } -func (r *Renderer) RenderClusterRoleBinding(res *resource.Resource, namespace string, labels map[string]string) (*unstructured.Unstructured, error) { +func (r *Renderer) RenderClusterRoleBinding( + res *resource.Resource, + namespace string, + labels map[string]string, +) (*unstructured.Unstructured, error) { u := &unstructured.Unstructured{Object: res.Map()} cLabels := u.GetLabels() diff --git a/operators/pkg/util/obj_compare.go b/operators/pkg/util/obj_compare.go index 4ac3c79ce..367169a5b 100644 --- a/operators/pkg/util/obj_compare.go +++ b/operators/pkg/util/obj_compare.go @@ -7,6 +7,7 @@ import ( "reflect" "strings" + prometheusv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" v1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -52,6 +53,7 @@ func GetK8sObj(kind string) runtime.Object { "Service": &corev1.Service{}, "CustomResourceDefinition": &apiextensionsv1.CustomResourceDefinition{}, "ObservabilityAddon": &mcov1beta1.ObservabilityAddon{}, + "Prometheus": &prometheusv1.Prometheus{}, } return objs[kind] } diff --git a/proxy/pkg/util/util.go b/proxy/pkg/util/util.go index 00401daca..45217e1db 100644 --- a/proxy/pkg/util/util.go +++ b/proxy/pkg/util/util.go @@ -44,7 +44,7 @@ func InitAllManagedClusterNames() { } // ModifyMetricsQueryParams will modify request url params for query metrics -func ModifyMetricsQueryParams(req *http.Request, url string) { +func ModifyMetricsQueryParams(req *http.Request, reqUrl string) { userName := req.Header.Get("X-Forwarded-User") klog.V(1).Infof("user is %v", userName) klog.V(1).Infof("URL is: %s", req.URL) @@ -58,7 +58,7 @@ func ModifyMetricsQueryParams(req *http.Request, url string) { projectList, ok := GetUserProjectList(token) klog.V(1).Infof("projectList from local mem cache = %v, ok = %v", projectList, ok) if !ok { - projectList = FetchUserProjectList(token, url) + projectList = FetchUserProjectList(token, reqUrl) up := NewUserProject(userName, token, projectList) UpdateUserProject(up) klog.V(1).Infof("projectList from api server = %v", projectList) @@ -73,28 +73,52 @@ func ModifyMetricsQueryParams(req *http.Request, url string) { clusterList := getUserClusterList(projectList) klog.Infof("user <%v> have access to these clusters: %v", userName, clusterList) - queryValues := req.URL.Query() - if len(queryValues) == 0 { - return - } - queryValues = rewriteQuery(queryValues, clusterList, "query") - queryValues = rewriteQuery(queryValues, clusterList, "match[]") - req.URL.RawQuery = queryValues.Encode() + var rawQuery string + if req.Method == "POST" { + body, _ := ioutil.ReadAll(req.Body) + _ = req.Body.Close() + queryValues, err := url.ParseQuery(string(body)) + if err != nil { + klog.Errorf("Failed to parse request body: %v", err) + return + } + if len(queryValues) == 0 { + return + } + queryValues = rewriteQuery(queryValues, clusterList, "query") + queryValues = rewriteQuery(queryValues, clusterList, "match[]") + rawQuery = queryValues.Encode() + req.Body = ioutil.NopCloser(strings.NewReader(rawQuery)) + req.Header.Set("Content-Length", fmt.Sprint(len([]rune(rawQuery)))) + req.ContentLength = int64(len([]rune(rawQuery))) + } else { + queryValues := req.URL.Query() + if len(queryValues) == 0 { + return + } + queryValues = rewriteQuery(queryValues, clusterList, "query") + queryValues = rewriteQuery(queryValues, clusterList, "match[]") + req.URL.RawQuery = queryValues.Encode() + rawQuery = req.URL.RawQuery + } - queryValues = req.URL.Query() klog.V(1).Info("modified URL is:") klog.V(1).Infof("URL is: %s", req.URL) klog.V(1).Infof("URL path is: %v", req.URL.Path) - klog.V(1).Infof("URL RawQuery is: %v", req.URL.RawQuery) + klog.V(1).Infof("URL RawQuery is: %v", rawQuery) return } // WatchManagedCluster will watch and save managedcluster when create/update/delete managedcluster func WatchManagedCluster(clusterClient clusterclientset.Interface) { InitAllManagedClusterNames() - watchlist := cache.NewListWatchFromClient(clusterClient.ClusterV1().RESTClient(), "managedclusters", v1.NamespaceAll, - fields.Everything()) + watchlist := cache.NewListWatchFromClient( + clusterClient.ClusterV1().RESTClient(), + "managedclusters", + v1.NamespaceAll, + fields.Everything(), + ) _, controller := cache.NewInformer( watchlist, &clusterv1.ManagedCluster{}, diff --git a/tests/Dockerfile b/tests/Dockerfile index fd6b4682e..94cd1ce79 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/open-cluster-management/builder:go1.17-linux AS builder +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder # install oc into build image RUN curl -fksSL https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.6.3/openshift-client-linux-4.6.3.tar.gz | tar -xvz -C /usr/local/ oc diff --git a/tests/format-results.sh b/tests/format-results.sh index 45249b2d3..d750407d4 100755 --- a/tests/format-results.sh +++ b/tests/format-results.sh @@ -11,5 +11,5 @@ if [ -z $1 ]; then exit 1 fi -sed -i "s~BeforeSuite~Observability: [P1][Sev1][Observability] Cannot enable observability service successfully~g" $1 -sed -i "s~AfterSuite~Observability: [P1][Sev1][Observability] Cannot uninstall observability service completely~g" $1 +sed -i "s~BeforeSuite~Observability: [P1][Sev1][observability] Cannot enable observability service successfully~g" $1 +sed -i "s~AfterSuite~Observability: [P1][Sev1][observability] Cannot uninstall observability service completely~g" $1 diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index dbc091299..c982de04c 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -85,12 +85,42 @@ func init() { klog.SetOutput(GinkgoWriter) klog.InitFlags(nil) - flag.StringVar(&kubeadminUser, "kubeadmin-user", "kubeadmin", "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-user=\"xxxxx\").") - flag.StringVar(&kubeadminCredential, "kubeadmin-credential", "", "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-credential=\"xxxxx-xxxxx-xxxxx-xxxxx\").") - flag.StringVar(&baseDomain, "base-domain", "", "Provide the base domain for the cluster under test (e.g. -base-domain=\"demo.red-chesterfield.com\").") - flag.StringVar(&reportFile, "report-file", "results.xml", "Provide the path to where the junit results will be printed.") - flag.StringVar(&kubeconfig, "kubeconfig", "", "Location of the kubeconfig to use; defaults to KUBECONFIG if not set") - flag.StringVar(&optionsFile, "options", "", "Location of an \"options.yaml\" file to provide input for various tests") + flag.StringVar( + &kubeadminUser, + "kubeadmin-user", + "kubeadmin", + "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-user=\"xxxxx\").", + ) + flag.StringVar( + &kubeadminCredential, + "kubeadmin-credential", + "", + "Provide the kubeadmin credential for the cluster under test (e.g. -kubeadmin-credential=\"xxxxx-xxxxx-xxxxx-xxxxx\").", + ) + flag.StringVar( + &baseDomain, + "base-domain", + "", + "Provide the base domain for the cluster under test (e.g. -base-domain=\"demo.red-chesterfield.com\").", + ) + flag.StringVar( + &reportFile, + "report-file", + "results.xml", + "Provide the path to where the junit results will be printed.", + ) + flag.StringVar( + &kubeconfig, + "kubeconfig", + "", + "Location of the kubeconfig to use; defaults to KUBECONFIG if not set", + ) + flag.StringVar( + &optionsFile, + "options", + "", + "Location of an \"options.yaml\" file to provide input for various tests", + ) } func TestObservabilityE2E(t *testing.T) { @@ -185,7 +215,10 @@ func initVars() { baseDomain = testOptions.HubCluster.BaseDomain if testOptions.HubCluster.ClusterServerURL == "" { - testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", testOptions.HubCluster.BaseDomain) + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf( + "https://api.%s:6443", + testOptions.HubCluster.BaseDomain, + ) } } else { Expect(baseDomain).NotTo(BeEmpty(), "The `baseDomain` is required.") diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 00a6d6947..f5b4be885 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -64,59 +64,86 @@ var _ = Describe("", func() { By("Waiting for MCO addon components scales to 0") Eventually(func() error { - err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") - if len(podList.Items) != 0 || err != nil { + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) + if err != nil { return fmt.Errorf("Failed to disable observability addon") } - return nil - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - - Eventually(func() error { - err = utils.CheckAllOBADisabled(testOptions) - if err != nil { - return err + if len(podList.Items) != 0 { + for _, po := range podList.Items { + if po.Status.Phase == "Running" { + return fmt.Errorf("Failed to disable observability addon, there is still metrics-collector pod in Running") + } + } } return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + // according to PR - stolostron/multicluster-observability-operator#886 + // 2.4 - delete obs addon when enableMetrics is set to false + /* + Eventually(func() error { + err = utils.CheckAllOBADisabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + */ }) - // it takes Prometheus 5m to notice a metric is not available - https://github.com/prometheus/prometheus/issues/1810 + // it takes Prometheus 5m to notice a metric is not available - + // https://github.com/prometheus/prometheus/issues/1810 // the corret way is use timestamp, for example: - // timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"}) - timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"} offset 1m) > 59 + // timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"}) - + // timestamp(node_memory_MemAvailable_bytes{cluster="local-cluster"} offset 1m) > 59 It("[Stable] Waiting for check no metric data in grafana console", func() { Eventually(func() error { for _, cluster := range clusters { - err, hasMetric := utils.ContainManagedClusterMetric(testOptions, `timestamp(node_memory_MemAvailable_bytes{cluster="`+cluster+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+cluster+`"} offset 1m) > 59`, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) - if err != nil && !hasMetric && strings.Contains(err.Error(), "Failed to find metric name from response") { + err, hasMetric := utils.ContainManagedClusterMetric( + testOptions, + `timestamp(node_memory_MemAvailable_bytes{cluster="`+cluster+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+cluster+`"} offset 1m) > 59`, + []string{`"__name__":"node_memory_MemAvailable_bytes"`}, + ) + if err != nil && !hasMetric && + strings.Contains(err.Error(), "Failed to find metric name from response") { return nil } } return fmt.Errorf("Check no metric data in grafana console error: %v", err) }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) }) - }) It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) - By("Waiting for MCO addon components ready") - Eventually(func() bool { - err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") - if len(podList.Items) == 1 && err == nil { - return true - } - return false - }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) - By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") - Eventually(func() error { - err = utils.CheckAllOBAsEnabled(testOptions) - if err != nil { - return err - } - return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) + }) }) It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { @@ -149,17 +176,14 @@ var _ = Describe("", func() { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - klog.V(1).Infof("managedcluster number is <%d>", len(testOptions.ManagedClusters)) - if len(testOptions.ManagedClusters) > 0 { - By("Waiting for MCO addon components scales to 0") - Eventually(func() bool { - err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) - if err == nil && obaNS == nil { - return true - } - return false - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) - } + By("Waiting for MCO addon components scales to 0") + Eventually(func() bool { + err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) + if err == nil && obaNS == nil { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) }) It("[Stable] Remove disable observability label from the managed cluster", func() { @@ -169,14 +193,20 @@ var _ = Describe("", func() { By("Waiting for MCO addon components ready") Eventually(func() bool { - err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) if len(podList.Items) == 1 && err == nil { return true } return false }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) }) - }) + }, + ) JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index f6f94e33f..4a88e4a55 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -53,7 +53,9 @@ var _ = Describe("", func() { It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { - sts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{LabelSelector: label}) + sts, err := hubClient.AppsV1(). + StatefulSets(MCO_NAMESPACE). + List(context.TODO(), metav1.ListOptions{LabelSelector: label}) Expect(err).NotTo(HaveOccurred()) for _, stsInfo := range (*sts).Items { Expect(len(stsInfo.Spec.Template.Spec.Volumes)).Should(BeNumerically(">", 0)) @@ -65,7 +67,9 @@ var _ = Describe("", func() { if strings.Contains(stsInfo.Name, "-thanos-rule") { By("The statefulset: " + stsInfo.Name + " should have the appropriate configmap mounted") - Expect(stsInfo.Spec.Template.Spec.Volumes[0].ConfigMap.Name).To(Equal("thanos-ruler-default-rules")) + Expect( + stsInfo.Spec.Template.Spec.Volumes[0].ConfigMap.Name, + ).To(Equal("thanos-ruler-default-rules")) } } } @@ -141,9 +145,16 @@ var _ = Describe("", func() { stsName := (*rules).Items[0].Name oldSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) - yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_valid"}) + yamlB, err := kustomize.Render( + kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_valid"}, + ) Expect(err).NotTo(HaveOccurred()) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB)).NotTo(HaveOccurred()) ThanosRuleRestarting := false By("Wait for thanos rule pods are restarted and ready") @@ -180,19 +191,31 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("[P2][Sev2][Observability][Stable] Should modify the SECRET: alertmanager-config (alert/g0)", func() { + It("[P2][Sev2][observability][Stable] Should modify the SECRET: alertmanager-config (alert/g0)", func() { By("Editing the secret, we should be able to add the third partying tools integrations") secret := utils.CreateCustomAlertConfigYaml(testOptions.HubCluster.BaseDomain) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, secret)).NotTo(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + secret)).NotTo(HaveOccurred()) klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable] (alert/g0)", func() { By("Updating custom alert rules") - yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_invalid"}) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + yamlB, _ := kustomize.Render( + kustomize.Options{KustomizationPath: "../../../examples/alerts/custom_rules_invalid"}, + ) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB)).NotTo(HaveOccurred()) var labelName, labelValue string labels, _ := kustomize.GetLabels(yamlB) @@ -201,11 +224,14 @@ var _ = Describe("", func() { } By("Checking alert generated") - Eventually(func() error { - err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, - []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) - return err - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) + Eventually( + func() error { + err, _ := utils.ContainManagedClusterMetric(testOptions, `ALERTS{`+labelName+`="`+labelValue+`"}`, + []string{`"__name__":"ALERTS"`, `"` + labelName + `":"` + labelValue + `"`}) + return err + }, + EventuallyTimeoutMinute*5, + EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable] (alert/g0)", func() { @@ -220,7 +246,9 @@ var _ = Describe("", func() { oldSts, _ := utils.GetStatefulSet(testOptions, true, stsName, MCO_NAMESPACE) Eventually(func() error { - err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Delete(context.TODO(), configmap[1], metav1.DeleteOptions{}) + err := hubClient.CoreV1(). + ConfigMaps(MCO_NAMESPACE). + Delete(context.TODO(), configmap[1], metav1.DeleteOptions{}) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) @@ -272,6 +300,10 @@ var _ = Describe("", func() { Expect(err).NotTo(HaveOccurred()) if os.Getenv("IS_KIND_ENV") != "true" { + if BearerToken == "" { + BearerToken, err = utils.FetchBearerToken(testOptions) + Expect(err).NotTo(HaveOccurred()) + } alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) } @@ -281,6 +313,24 @@ var _ = Describe("", func() { Expect(err).NotTo(HaveOccurred()) expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) + // install watchdog PrometheusRule to *KS clusters + watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: watchDogRuleKustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + for _, ks := range expectedKSClusterNames { + for idx, mc := range testOptions.ManagedClusters { + if mc.Name == ks { + err = utils.Apply( + testOptions.ManagedClusters[idx].ClusterServerURL, + testOptions.ManagedClusters[idx].KubeConfig, + testOptions.ManagedClusters[idx].KubeContext, + yamlB, + ) + Expect(err).NotTo(HaveOccurred()) + } + } + } + By("Checking Watchdog alerts are forwarded to the hub") Eventually(func() error { resp, err := client.Do(alertGetReq) diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 1edddec51..84ec7ef7d 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -35,7 +35,12 @@ var _ = Describe("", func() { hubPodsName := []string{} Eventually(func() bool { if collectorPodName == "" { - _, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + _, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) if podList != nil && len(podList.Items) > 0 { collectorPodName = podList.Items[0].Name } @@ -44,7 +49,12 @@ var _ = Describe("", func() { return false } hubPodsName = []string{} - _, apiPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app.kubernetes.io/name=observatorium-api") + _, apiPodList := utils.GetPodList( + testOptions, + true, + MCO_NAMESPACE, + "app.kubernetes.io/name=observatorium-api", + ) if apiPodList != nil && len(apiPodList.Items) != 0 { for _, pod := range apiPodList.Items { hubPodsName = append(hubPodsName, pod.Name) @@ -70,11 +80,18 @@ var _ = Describe("", func() { By(fmt.Sprintf("Waiting for old pods removed: %v and new pods created", hubPodsName)) Eventually(func() bool { - err1, appPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app.kubernetes.io/name=observatorium-api") + err1, appPodList := utils.GetPodList( + testOptions, + true, + MCO_NAMESPACE, + "app.kubernetes.io/name=observatorium-api", + ) err2, rbacPodList := utils.GetPodList(testOptions, true, MCO_NAMESPACE, "app=rbac-query-proxy") if err1 == nil && err2 == nil { if len(hubPodsName) != len(appPodList.Items)+len(rbacPodList.Items) { - klog.V(1).Infof("Wrong number of pods: <%d> observatorium-api pods and <%d> rbac-query-proxy pods", len(appPodList.Items), len(rbacPodList.Items)) + klog.V(1).Infof("Wrong number of pods: <%d> observatorium-api pods and <%d> rbac-query-proxy pods", + len(appPodList.Items), + len(rbacPodList.Items)) return false } for _, oldPodName := range hubPodsName { @@ -122,7 +139,12 @@ var _ = Describe("", func() { By(fmt.Sprintf("Waiting for old pod <%s> removed and new pod created", collectorPodName)) Eventually(func() bool { - err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) if err == nil { for _, pod := range podList.Items { if pod.Name != collectorPodName { @@ -136,7 +158,12 @@ var _ = Describe("", func() { } // debug code to check label "cert/time-restarted" - deployment, err := utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + deployment, err := utils.GetDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) if err == nil { klog.V(1).Infof("labels: <%v>", deployment.Spec.Template.ObjectMeta.Labels) } diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index add2e53e8..18d2ad9f4 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -34,7 +34,8 @@ var _ = Describe("", func() { if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") } - mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) } @@ -47,7 +48,8 @@ var _ = Describe("", func() { if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") } - mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) spec := mcoSC.Object["spec"].(map[string]interface{}) @@ -70,7 +72,9 @@ var _ = Describe("", func() { } Eventually(func() error { - pvcList, err := hubClient.CoreV1().PersistentVolumeClaims(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{}) + pvcList, err := hubClient.CoreV1(). + PersistentVolumeClaims(MCO_NAMESPACE). + List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -80,7 +84,12 @@ var _ = Describe("", func() { scName := *pvc.Spec.StorageClassName statusPhase := pvc.Status.Phase if scName != expectedSC || statusPhase != "Bound" { - return fmt.Errorf("PVC check failed, scName = %s, expectedSC = %s, statusPhase = %s", scName, expectedSC, statusPhase) + return fmt.Errorf( + "PVC check failed, scName = %s, expectedSC = %s, statusPhase = %s", + scName, + expectedSC, + statusPhase, + ) } } } @@ -145,7 +154,8 @@ var _ = Describe("", func() { It("RHACM4K-2822: Observability: Verify the replica in advanced config for Observability components @BVT - [P1][Sev1][Observability][Integration] (config/g0)", func() { - mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) } @@ -208,19 +218,59 @@ var _ = Describe("", func() { Expect(err).NotTo(HaveOccurred()) for _, deployInfo := range (*deploys).Items { Expect(cpu).To(Equal(deployInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())) - Expect(limits["memory"]).To(Equal(deployInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())) + Expect( + limits["memory"], + ).To(Equal(deployInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())) } } else { sts, err := utils.GetStatefulSetWithLabel(testOptions, true, component.Label, MCO_NAMESPACE) Expect(err).NotTo(HaveOccurred()) for _, stsInfo := range (*sts).Items { Expect(cpu).To(Equal(stsInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().String())) - Expect(limits["memory"]).To(Equal(stsInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String())) + memStr := stsInfo.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String() + Expect(limits["memory"]).To(Equal(memStr)) } } } }) + It("[P2][Sev2][observability][Integration] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if err != nil { + panic(err.Error()) + } + + spec := mcoRes.Object["spec"].(map[string]interface{}) + if _, adv := spec["advanced"]; !adv { + Skip("Skip the case since the MCO CR did not have advanced spec configed") + } + + advancedSpec := mcoRes.Object["spec"].(map[string]interface{})["advanced"].(map[string]interface{}) + + for _, component := range []string{"compact", "store", "query", "receive", "rule"} { + klog.V(1).Infof("The component is: %s\n", component) + annotations := advancedSpec[component].(map[string]interface{})["serviceAccountAnnotations"].(map[string]interface{}) + sas, err := utils.GetSAWithLabel(testOptions, true, + "app.kubernetes.io/name=thanos-"+component, MCO_NAMESPACE) + Expect(err).NotTo(HaveOccurred()) + for _, saInfo := range (*sas).Items { + for key, value := range annotations { + exist := false + for eKey, eValue := range saInfo.Annotations { + if eKey == key && eValue == value.(string) { + exist = true + continue + } + } + Expect(exist).To(BeTrue()) + } + } + + } + }) + JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index faf9ed5d3..773bdf1c2 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -32,8 +32,15 @@ var _ = Describe("", func() { It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { By("Creating custom dashboard configmap") - yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + yamlB, _ := kustomize.Render( + kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, + ) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB)).NotTo(HaveOccurred()) Eventually(func() bool { _, result := utils.ContainDashboard(testOptions, dashboardTitle) return result @@ -42,8 +49,15 @@ var _ = Describe("", func() { It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { By("Updating custom dashboard configmap") - yamlB, _ := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + yamlB, _ := kustomize.Render( + kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, + ) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB)).NotTo(HaveOccurred()) Eventually(func() bool { _, result := utils.ContainDashboard(testOptions, dashboardTitle) return result diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index bba4346c9..e2516d03c 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -37,17 +37,32 @@ var _ = Describe("", func() { dep *appv1.Deployment ) Eventually(func() error { - dep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + dep, err = utils.GetDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() error { - err = utils.DeleteDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + err = utils.DeleteDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() bool { - newDep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + newDep, err = utils.GetDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) if err == nil { if dep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion { return true @@ -59,17 +74,33 @@ var _ = Describe("", func() { It("[Stable] Updating metrics-collector deployment", func() { updateSaName := "test-serviceaccount" Eventually(func() error { - newDep, err = utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + newDep, err = utils.GetDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) if err != nil { return err } newDep.Spec.Template.Spec.ServiceAccountName = updateSaName - newDep, err = utils.UpdateDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE, newDep) + newDep, err = utils.UpdateDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + newDep, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() bool { - revertDep, err := utils.GetDeployment(testOptions, false, "metrics-collector-deployment", MCO_ADDON_NAMESPACE) + revertDep, err := utils.GetDeployment( + testOptions, + false, + "metrics-collector-deployment", + MCO_ADDON_NAMESPACE, + ) if err == nil { if revertDep.ObjectMeta.ResourceVersion != newDep.ObjectMeta.ResourceVersion && revertDep.Spec.Template.Spec.ServiceAccountName != updateSaName { @@ -130,16 +161,31 @@ var _ = Describe("", func() { cm *v1.ConfigMap ) Eventually(func() error { - err, cm = utils.GetConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + err, cm = utils.GetConfigMap( + testOptions, + false, + "metrics-collector-serving-certs-ca-bundle", + MCO_ADDON_NAMESPACE, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() error { - err = utils.DeleteConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + err = utils.DeleteConfigMap( + testOptions, + false, + "metrics-collector-serving-certs-ca-bundle", + MCO_ADDON_NAMESPACE, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) newCm := &v1.ConfigMap{} Eventually(func() bool { - err, newCm = utils.GetConfigMap(testOptions, false, "metrics-collector-serving-certs-ca-bundle", MCO_ADDON_NAMESPACE) + err, newCm = utils.GetConfigMap( + testOptions, + false, + "metrics-collector-serving-certs-ca-bundle", + MCO_ADDON_NAMESPACE, + ) if err == nil { if cm.ObjectMeta.ResourceVersion != newCm.ObjectMeta.ResourceVersion { return true diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index ce20b79a2..bac28b52c 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -33,7 +33,11 @@ var _ = Describe("", func() { } for _, cluster := range clusters { query := fmt.Sprintf("node_memory_MemAvailable_bytes{cluster=\"%s\"}", cluster) - err, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"node_memory_MemAvailable_bytes"`}, + ) if err != nil { return err } diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go index 238501b28..263cb432e 100644 --- a/tests/pkg/tests/observability_install_test.go +++ b/tests/pkg/tests/observability_install_test.go @@ -51,13 +51,29 @@ func installMCO() { // print mco logs if MCO installation failed defer func(testOptions utils.TestOptions, isHub bool, namespace, podName, containerName string, previous bool, tailLines int64) { if testFailed { - mcoLogs, err := utils.GetPodLogs(testOptions, isHub, namespace, podName, containerName, previous, tailLines) + mcoLogs, err := utils.GetPodLogs( + testOptions, + isHub, + namespace, + podName, + containerName, + previous, + tailLines, + ) Expect(err).NotTo(HaveOccurred()) fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed failed, checking MCO operator logs:\n%s\n", mcoLogs) } else { fmt.Fprintf(GinkgoWriter, "[DEBUG] MCO is installed successfully!\n") } - }(testOptions, false, mcoNs, mcoPod, "multicluster-observability-operator", false, 1000) + }( + testOptions, + false, + mcoNs, + mcoPod, + "multicluster-observability-operator", + false, + 1000, + ) By("Checking Required CRDs are created") Eventually(func() error { @@ -84,7 +100,13 @@ func installMCO() { //set resource quota and limit range for canary environment to avoid destruct the node yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/policy"}) Expect(err).NotTo(HaveOccurred()) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) By("Creating the MCO testing RBAC resources") Expect(utils.CreateMCOTestingRBAC(testOptions)).NotTo(HaveOccurred()) @@ -94,12 +116,19 @@ func installMCO() { v1beta1KustomizationPath := "../../../examples/mco/e2e/v1beta1" yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta1KustomizationPath}) Expect(err).NotTo(HaveOccurred()) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) By("Waiting for MCO ready status") allPodsIsReady := false Eventually(func() error { - instance, err := dynClient.Resource(utils.NewMCOGVRV1BETA1()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + instance, err := dynClient.Resource(utils.NewMCOGVRV1BETA1()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err == nil { allPodsIsReady = utils.StatusContainsTypeEqualTo(instance, "Ready") if allPodsIsReady { @@ -109,7 +138,10 @@ func installMCO() { } testFailed = true if instance != nil && instance.Object != nil { - return fmt.Errorf("MCO componnets cannot be running in 20 minutes. check the MCO CR status for the details: %v", instance.Object["status"]) + return fmt.Errorf( + "MCO componnets cannot be running in 20 minutes. check the MCO CR status for the details: %v", + instance.Object["status"], + ) } else { return fmt.Errorf("Wait for reconciling.") } @@ -117,7 +149,8 @@ func installMCO() { By("Check clustermanagementaddon CR is created") Eventually(func() error { - _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()). + Get(context.TODO(), "observability-controller", metav1.GetOptions{}) if err != nil { testFailed = true return err @@ -132,16 +165,42 @@ func installMCO() { Expect(err).NotTo(HaveOccurred()) } - By("Apply MCO instance of v1beta2") - v1beta2KustomizationPath := "../../../examples/mco/e2e/v1beta2" - yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta2KustomizationPath}) - Expect(err).NotTo(HaveOccurred()) + if os.Getenv("IS_CANARY_ENV") != "true" { + By("Recreating Minio-tls as object storage") + //set resource quota and limit range for canary environment to avoid destruct the node + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/minio-tls"}) + Expect(err).NotTo(HaveOccurred()) + Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) - // add retry for update mco object failure - Eventually(func() error { - return utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB) - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + By("Apply MCO instance of v1beta2") + v1beta2KustomizationPath := "../../../examples/mco/e2e/v1beta2/custom-certs" + yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta2KustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + // add retry for update mco object failure + Eventually(func() error { + return utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + ) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + } else { + By("Apply MCO instance of v1beta2") + v1beta2KustomizationPath := "../../../examples/mco/e2e/v1beta2" + yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: v1beta2KustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + // add retry for update mco object failure + Eventually(func() error { + return utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + ) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + } // wait for pod restarting time.Sleep(60 * time.Second) @@ -170,7 +229,8 @@ func installMCO() { By("Check clustermanagementaddon CR is created") Eventually(func() error { - _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()). + Get(context.TODO(), "observability-controller", metav1.GetOptions{}) if err != nil { testFailed = true return err diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index a0c941e3d..8236783e5 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][Observability][Stable] Should be automatically created within 1 minute when delete manifestwork (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) @@ -40,20 +40,26 @@ var _ = Describe("", func() { } Eventually(func() error { - oldManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) + oldManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()). + Namespace(clusterName). + Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) oldManifestWorkResourceVersion = oldManifestWork.GetResourceVersion() return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) By("Waiting for manifestwork to be deleted") Eventually(func() error { - err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Delete(context.TODO(), manifestWorkName, metav1.DeleteOptions{}) + err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()). + Namespace(clusterName). + Delete(context.TODO(), manifestWorkName, metav1.DeleteOptions{}) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) By("Waiting for manifestwork to be created automatically") Eventually(func() error { - newManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()).Namespace(clusterName).Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) + newManifestWork, err := clientDynamic.Resource(utils.NewOCMManifestworksGVR()). + Namespace(clusterName). + Get(context.TODO(), manifestWorkName, metav1.GetOptions{}) if err == nil { if newManifestWork.GetResourceVersion() != oldManifestWorkResourceVersion { return nil @@ -67,7 +73,12 @@ var _ = Describe("", func() { It("[Stable] Waiting for metrics collector to be created automatically", func() { Eventually(func() error { - _, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + _, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) if podList != nil && len(podList.Items) > 0 { if oldCollectorPodName != podList.Items[0].Name { return nil @@ -89,7 +100,11 @@ var _ = Describe("", func() { It("[Stable] Checking metric to ensure that no data is lost in 1 minute", func() { Eventually(func() error { - err, _ = utils.ContainManagedClusterMetric(testOptions, `timestamp(node_memory_MemAvailable_bytes{cluster="`+clusterName+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+clusterName+`"} offset 1m) > 59`, []string{`"__name__":"node_memory_MemAvailable_bytes"`}) + err, _ = utils.ContainManagedClusterMetric( + testOptions, + `timestamp(node_memory_MemAvailable_bytes{cluster="`+clusterName+`}) - timestamp(node_memory_MemAvailable_bytes{cluster=`+clusterName+`"} offset 1m) > 59`, + []string{`"__name__":"node_memory_MemAvailable_bytes"`}, + ) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*3).Should(Succeed()) }) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 427f9eca6..71a14e6d6 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -52,13 +52,23 @@ var _ = Describe("", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) - Expect(utils.Apply(testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext, yamlB)).NotTo(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) By("Waiting for new added metrics on grafana console") Eventually(func() error { for _, cluster := range clusters { query := fmt.Sprintf("node_memory_Active_bytes{cluster=\"%s\"} offset 1m", cluster) - err, _ := utils.ContainManagedClusterMetric(testOptions, query, []string{`"__name__":"node_memory_Active_bytes"`}) + err, _ := utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"node_memory_Active_bytes"`}, + ) if err != nil { return err } @@ -71,8 +81,11 @@ var _ = Describe("", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { - query := fmt.Sprintf("timestamp(instance:node_num_cpu:sum{cluster=\"%s\"}) - timestamp(instance:node_num_cpu:sum{cluster=\"%s\"} offset 1m) > 59", - cluster, cluster) + query := fmt.Sprintf( + "timestamp(instance:node_num_cpu:sum{cluster=\"%s\"}) - timestamp(instance:node_num_cpu:sum{cluster=\"%s\"} offset 1m) > 59", + cluster, + cluster, + ) metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) if metricslistError == nil { return nil @@ -86,8 +99,11 @@ var _ = Describe("", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { - query := fmt.Sprintf("timestamp(go_goroutines{cluster=\"%s\"}) - timestamp(go_goroutines{cluster=\"%s\"} offset 1m) > 59", - cluster, cluster) + query := fmt.Sprintf( + "timestamp(go_goroutines{cluster=\"%s\"}) - timestamp(go_goroutines{cluster=\"%s\"} offset 1m) > 59", + cluster, + cluster, + ) metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) if metricslistError == nil { return nil @@ -100,15 +116,20 @@ var _ = Describe("", func() { It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { - err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Delete(context.TODO(), allowlistCMname, metav1.DeleteOptions{}) + err := hubClient.CoreV1(). + ConfigMaps(MCO_NAMESPACE). + Delete(context.TODO(), allowlistCMname, metav1.DeleteOptions{}) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) By("Waiting for new added metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { - query := fmt.Sprintf("timestamp(node_memory_Active_bytes{cluster=\"%s\"}) - timestamp(node_memory_Active_bytes{cluster=\"%s\"} offset 1m) > 59", - cluster, cluster) + query := fmt.Sprintf( + "timestamp(node_memory_Active_bytes{cluster=\"%s\"}) - timestamp(node_memory_Active_bytes{cluster=\"%s\"} offset 1m) > 59", + cluster, + cluster, + ) metricslistError, _ = utils.ContainManagedClusterMetric(testOptions, query, []string{}) if metricslistError == nil { return nil @@ -121,6 +142,17 @@ var _ = Describe("", func() { It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration] (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() + _, etcdPodList := utils.GetPodList( + testOptions, + true, + "openshift-etcd", + "app=etcd", + ) + // ignore etcd network peer metrics for SNO cluster + if etcdPodList != nil && len(etcdPodList.Items) <= 0 { + ignoreMetricMap["etcd_network_peer_received_bytes_total"] = true + ignoreMetricMap["etcd_network_peer_sent_bytes_total"] = true + } for _, name := range metricList { _, ok := ignoreMetricMap[name] if !ok { diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 5b30662a7..91584940a 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -5,7 +5,6 @@ package tests import ( "context" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -29,25 +28,39 @@ var _ = Describe("", func() { Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability] (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { - oldResourceVersion := "" + oldCRResourceVersion := "" updateRetention := "10d" + oldCompactResourceVersion := "" Eventually(func() error { - cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()). + Namespace(MCO_NAMESPACE). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { return err } + oldCRResourceVersion = cr.Object["metadata"].(map[string]interface{})["resourceVersion"].(string) + + sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) + if err != nil { + return err + } + oldCompactResourceVersion = (*sts).Items[0].ResourceVersion + cr.Object["spec"].(map[string]interface{})["thanos"].(map[string]interface{})["compact"].(map[string]interface{})["retentionResolution1h"] = updateRetention - oldResourceVersion = cr.Object["metadata"].(map[string]interface{})["resourceVersion"].(string) - _, err = dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Update(context.TODO(), cr, metav1.UpdateOptions{}) + _, err = dynClient.Resource(utils.NewMCOMObservatoriumGVR()). + Namespace(MCO_NAMESPACE). + Update(context.TODO(), cr, metav1.UpdateOptions{}) return err }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(Succeed()) Eventually(func() bool { - cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + cr, err := dynClient.Resource(utils.NewMCOMObservatoriumGVR()). + Namespace(MCO_NAMESPACE). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err == nil { replicasNewRetention := cr.Object["spec"].(map[string]interface{})["thanos"].(map[string]interface{})["compact"].(map[string]interface{})["retentionResolution1h"] newResourceVersion := cr.Object["metadata"].(map[string]interface{})["resourceVersion"].(string) - if newResourceVersion != oldResourceVersion && + if newResourceVersion != oldCRResourceVersion && replicasNewRetention != updateRetention { return true } @@ -55,21 +68,35 @@ var _ = Describe("", func() { return false }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*1).Should(BeTrue()) - // wait for pod restarting - time.Sleep(10 * time.Second) + // ensure the thanos compact is restarted + Eventually(func() bool { + sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) + if err == nil { + if (*sts).Items[0].ResourceVersion != oldCompactResourceVersion { + argList := (*sts).Items[0].Spec.Template.Spec.Containers[0].Args + for _, arg := range argList { + if arg != "--retention.resolution-raw="+updateRetention { + return true + } + } + return false + } + } + return false + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(BeTrue()) By("Wait for thanos compact pods are ready") sts, err := utils.GetStatefulSetWithLabel(testOptions, true, THANOS_COMPACT_LABEL, MCO_NAMESPACE) Expect(err).NotTo(HaveOccurred()) Expect(len(sts.Items)).NotTo(Equal(0)) - // ensure the thanos rule pods are restarted successfully before processing + // ensure the thanos rule pod is ready Eventually(func() error { err = utils.CheckStatefulSetPodReady(testOptions, (*sts).Items[0].Name) if err != nil { return err } return nil - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) }) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index bf6bd6af8..892d4905b 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -98,7 +98,8 @@ var _ = Describe("", func() { It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") - mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) spec := mcoSC.Object["spec"].(map[string]interface{}) diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index b19b64891..59de8110e 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -36,7 +36,8 @@ var _ = Describe("", func() { testOptions.KubeConfig, testOptions.HubCluster.KubeContext) - mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) } @@ -48,7 +49,10 @@ var _ = Describe("", func() { case "deleteDelay": deleteDelay = reflect.ValueOf(v).String() idmk, _ := strconv.Atoi(deleteDelay[:len(deleteDelay)-1]) - ignoreDeletionMarksDelay = fmt.Sprintf("%.f", math.Ceil(float64(idmk)/float64(2))) + deleteDelay[len(deleteDelay)-1:] + ignoreDeletionMarksDelay = fmt.Sprintf( + "%.f", + math.Ceil(float64(idmk)/float64(2)), + ) + deleteDelay[len(deleteDelay)-1:] case "retentionInLocal": retentionInLocal = reflect.ValueOf(v).String() case "blockDuration": @@ -93,7 +97,10 @@ var _ = Describe("", func() { return nil } } - return fmt.Errorf("Failed to check store args: --ignore-deletion-marks-delay="+ignoreDeletionMarksDelay+". The args is: %v", argList) + return fmt.Errorf( + "Failed to check store args: --ignore-deletion-marks-delay="+ignoreDeletionMarksDelay+". The args is: %v", + argList, + ) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) @@ -112,7 +119,10 @@ var _ = Describe("", func() { return nil } } - return fmt.Errorf("Failed to check receive args: --tsdb.retention="+retentionInLocal+". The args is: %v", argList) + return fmt.Errorf( + "Failed to check receive args: --tsdb.retention="+retentionInLocal+". The args is: %v", + argList, + ) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) @@ -131,7 +141,10 @@ var _ = Describe("", func() { return nil } } - return fmt.Errorf("Failed to check rule args: --tsdb.retention="+retentionInLocal+". The args is: %v", argList) + return fmt.Errorf( + "Failed to check rule args: --tsdb.retention="+retentionInLocal+". The args is: %v", + argList, + ) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) @@ -150,7 +163,10 @@ var _ = Describe("", func() { return nil } } - return fmt.Errorf("Failed to check rule args: --tsdb.block-duration="+blockDuration+". The args is: %v", argList) + return fmt.Errorf( + "Failed to check rule args: --tsdb.block-duration="+blockDuration+". The args is: %v", + argList, + ) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 8e11271ca..e7ac27bac 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("@BVT - [P1][Sev1][Observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { + It("@BVT - [P1][Sev1][observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query @@ -88,7 +88,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("@BVT - [P1][Sev1][Observability][Integration] Should access alert via alertmanager route (route/g0)", func() { + It("@BVT - [P1][Sev1][observability][Integration] Should access alert via alertmanager route (route/g0)", func() { Eventually(func() error { query := "/api/v2/alerts" url := "https://alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query diff --git a/tests/pkg/tests/observability_uninstall_test.go b/tests/pkg/tests/observability_uninstall_test.go index dcf4c4114..6eb7077f8 100644 --- a/tests/pkg/tests/observability_uninstall_test.go +++ b/tests/pkg/tests/observability_uninstall_test.go @@ -51,7 +51,9 @@ func uninstallMCO() { name := MCO_CR_NAME + "-addon" clientDynamic := utils.GetKubeClientDynamic(testOptions, false) // should check oba instance from managedcluster - instance, _ := clientDynamic.Resource(utils.NewMCOAddonGVR()).Namespace(MCO_ADDON_NAMESPACE).Get(context.TODO(), name, metav1.GetOptions{}) + instance, _ := clientDynamic.Resource(utils.NewMCOAddonGVR()). + Namespace(MCO_ADDON_NAMESPACE). + Get(context.TODO(), name, metav1.GetOptions{}) if instance != nil { utils.PrintManagedClusterOBAObject(testOptions) return fmt.Errorf("Failed to delete MCO addon instance") @@ -62,7 +64,9 @@ func uninstallMCO() { By("Waiting for delete manifestwork") Eventually(func() error { name := "endpoint-observability-work" - _, err := dynClient.Resource(utils.NewOCMManifestworksGVR()).Namespace("local-cluster").Get(context.TODO(), name, metav1.GetOptions{}) + _, err := dynClient.Resource(utils.NewOCMManifestworksGVR()). + Namespace("local-cluster"). + Get(context.TODO(), name, metav1.GetOptions{}) return err }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(MatchError(`manifestworks.work.open-cluster-management.io "endpoint-observability-work" not found`)) diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml deleted file mode 100644 index e797eee66..000000000 --- a/tests/pkg/tests/results.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.addon b/tests/pkg/tests/results.xml.addon deleted file mode 100644 index a25fb7dcb..000000000 --- a/tests/pkg/tests/results.xml.addon +++ /dev/null @@ -1,152 +0,0 @@ - - - - - - - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00080d300>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 - �[1mSTEP�[0m: Check addon resource requirement - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.002s. Expected success, but got an error: <*errors.errorString | 0xc000310710>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 - �[1mSTEP�[0m: Check metrics-collector resource requirement - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:60 Timed out after 1200.005s. Expected success, but got an error: <*errors.StatusError | 0xc0002d2280>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:80 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:97 Timed out after 1200.005s. Expected success, but got an error: <*errors.StatusError | 0xc000988140>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:118 - �[1mSTEP�[0m: Waiting for MCO addon components ready �[1mSTEP�[0m: Checking the status in managedclusteraddon reflects the endpoint operator status correctly - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:147 Timed out after 300.002s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:159 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go index 917e572d2..be2102cc4 100644 --- a/tests/pkg/utils/mco_configmaps.go +++ b/tests/pkg/utils/mco_configmaps.go @@ -14,9 +14,13 @@ import ( func CreateConfigMap(opt TestOptions, isHub bool, cm *corev1.ConfigMap) error { clientKube := getKubeClient(opt, isHub) - found, err := clientKube.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Get(context.TODO(), cm.ObjectMeta.Name, metav1.GetOptions{}) + found, err := clientKube.CoreV1(). + ConfigMaps(cm.ObjectMeta.Namespace). + Get(context.TODO(), cm.ObjectMeta.Name, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { - _, err := clientKube.CoreV1().ConfigMaps(cm.ObjectMeta.Namespace).Create(context.TODO(), cm, metav1.CreateOptions{}) + _, err := clientKube.CoreV1(). + ConfigMaps(cm.ObjectMeta.Namespace). + Create(context.TODO(), cm, metav1.CreateOptions{}) if err == nil { klog.V(1).Infof("configmap %s created", cm.ObjectMeta.Name) } diff --git a/tests/pkg/utils/mco_dashboard.go b/tests/pkg/utils/mco_dashboard.go index 347278c67..84d22400e 100644 --- a/tests/pkg/utils/mco_dashboard.go +++ b/tests/pkg/utils/mco_dashboard.go @@ -30,7 +30,7 @@ func ContainDashboard(opt TestOptions, title string) (error, bool) { client := &http.Client{} if os.Getenv("IS_KIND_ENV") != "true" { tr := &http.Transport{ - /* #nosec */ + // #nosec TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index 8ceb768ea..4b66e3f27 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -168,7 +168,10 @@ func PrintAllMCOPodsStatus(opt TestOptions) { // only print not ready pod status if !isReady { - klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", pod.Name, pod.Status.Phase, pod.Status) + klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", + pod.Name, + pod.Status.Phase, + pod.Status) } } } @@ -192,7 +195,9 @@ func PrintMCOObject(opt TestOptions) { func PrintManagedClusterOBAObject(opt TestOptions) { clientDynamic := GetKubeClientDynamic(opt, false) - oba, getErr := clientDynamic.Resource(NewMCOAddonGVR()).Namespace(MCO_ADDON_NAMESPACE).Get(context.TODO(), "observability-addon", metav1.GetOptions{}) + oba, getErr := clientDynamic.Resource(NewMCOAddonGVR()). + Namespace(MCO_ADDON_NAMESPACE). + Get(context.TODO(), "observability-addon", metav1.GetOptions{}) if getErr != nil { klog.V(1).Infof("Failed to get oba object from managedcluster") return @@ -235,7 +240,10 @@ func PrintAllOBAPodsStatus(opt TestOptions) { // only print not ready pod status if !isReady { - klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", pod.Name, pod.Status.Phase, pod.Status) + klog.V(1).Infof("Pod <%s> is not on <%s> status due to %#v\n", + pod.Name, + pod.Status.Phase, + pod.Status) } } } @@ -768,7 +776,9 @@ func CreatePullSecret(opt TestOptions, mcoNs string) error { Namespace: MCO_NAMESPACE, } klog.V(1).Infof("Create MCO pull secret") - _, err = clientKube.CoreV1().Secrets(pullSecret.Namespace).Create(context.TODO(), pullSecret, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + Secrets(pullSecret.Namespace). + Create(context.TODO(), pullSecret, metav1.CreateOptions{}) return err } @@ -850,7 +860,9 @@ func UninstallMCO(opt TestOptions) error { opt.HubCluster.KubeContext) klog.V(1).Infof("Delete MCO object storage secret") - deleteObjSecretErr := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), OBJ_SECRET_NAME, metav1.DeleteOptions{}) + deleteObjSecretErr := clientKube.CoreV1(). + Secrets(MCO_NAMESPACE). + Delete(context.TODO(), OBJ_SECRET_NAME, metav1.DeleteOptions{}) if deleteObjSecretErr != nil { return deleteObjSecretErr } diff --git a/tests/pkg/utils/mco_deployments.go b/tests/pkg/utils/mco_deployments.go index 4a8e2e7e3..466d58529 100644 --- a/tests/pkg/utils/mco_deployments.go +++ b/tests/pkg/utils/mco_deployments.go @@ -26,7 +26,10 @@ func GetDeployment(opt TestOptions, isHub bool, name string, func GetDeploymentWithLabel(opt TestOptions, isHub bool, label string, namespace string) (*appv1.DeploymentList, error) { clientKube := getKubeClient(opt, isHub) - klog.V(1).Infof("Get get deployment with label selector <%v> in namespace <%v>, isHub: <%v>", label, namespace, isHub) + klog.V(1).Infof("Get get deployment with label selector <%v> in namespace <%v>, isHub: <%v>", + label, + namespace, + isHub) deps, err := clientKube.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: label, }) @@ -60,7 +63,11 @@ func UpdateDeployment( return updateDep, err } -func UpdateDeploymentReplicas(opt TestOptions, deployName, crProperty string, desiredReplicas, expectedReplicas int32) error { +func UpdateDeploymentReplicas( + opt TestOptions, + deployName, crProperty string, + desiredReplicas, expectedReplicas int32, +) error { clientDynamic := GetKubeClientDynamic(opt, true) deploy, err := GetDeployment(opt, true, deployName, MCO_NAMESPACE) if err != nil { @@ -72,7 +79,9 @@ func UpdateDeploymentReplicas(opt TestOptions, deployName, crProperty string, de return err } - obs, err := clientDynamic.Resource(NewMCOMObservatoriumGVR()).Namespace(MCO_NAMESPACE).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + obs, err := clientDynamic.Resource(NewMCOMObservatoriumGVR()). + Namespace(MCO_NAMESPACE). + Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { return err } diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index c042ae11f..e1ef022b7 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -15,7 +15,8 @@ func UpdateObservabilityFromManagedCluster(opt TestOptions, enableObservability clusterName := GetManagedClusterName(opt) if clusterName != "" { clientDynamic := GetKubeClientDynamic(opt, true) - cluster, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).Get(context.TODO(), clusterName, metav1.GetOptions{}) + cluster, err := clientDynamic.Resource(NewOCMManagedClustersGVR()). + Get(context.TODO(), clusterName, metav1.GetOptions{}) if err != nil { return err } @@ -30,7 +31,8 @@ func UpdateObservabilityFromManagedCluster(opt TestOptions, enableObservability } else { delete(labels, "observability") } - _, updateErr := clientDynamic.Resource(NewOCMManagedClustersGVR()).Update(context.TODO(), cluster, metav1.UpdateOptions{}) + _, updateErr := clientDynamic.Resource(NewOCMManagedClustersGVR()). + Update(context.TODO(), cluster, metav1.UpdateOptions{}) if updateErr != nil { return updateErr } @@ -134,7 +136,7 @@ func ListKSManagedClusterNames(opt TestOptions) ([]string, error) { if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { obsControllerStr = obsController.(string) } - if vendorStr != "OpenShift" && obsControllerStr != "unreachable" { + if vendorStr != "OpenShift" && obsControllerStr == "available" { clusterNameStr := "" if clusterNameVal, ok := labels["name"]; ok { clusterNameStr = clusterNameVal.(string) diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go index 7dafdcb52..3a0dfed34 100644 --- a/tests/pkg/utils/mco_oba.go +++ b/tests/pkg/utils/mco_oba.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) @@ -20,7 +21,9 @@ func CheckOBAStatus(opt TestOptions, namespace, status string) error { opt.KubeConfig, opt.HubCluster.KubeContext) - oba, err := dynClient.Resource(NewMCOAddonGVR()).Namespace(namespace).Get(context.TODO(), "observability-addon", metav1.GetOptions{}) + oba, err := dynClient.Resource(NewMCOAddonGVR()). + Namespace(namespace). + Get(context.TODO(), "observability-addon", metav1.GetOptions{}) if err != nil { return err } @@ -31,13 +34,28 @@ func CheckOBAStatus(opt TestOptions, namespace, status string) error { } } +func CheckOBADeleted(opt TestOptions, namespace string) error { + dynClient := NewKubeClientDynamic( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + _, err := dynClient.Resource(NewMCOAddonGVR()).Namespace(namespace).Get(context.TODO(), "observability-addon", metav1.GetOptions{}) + if err == nil || !errors.IsNotFound(err) { + return fmt.Errorf("observability-addon is not properly deleted for managed cluster %s", namespace) + } + return nil +} + func CheckManagedClusterAddonsStatus(opt TestOptions, namespace, status string) error { dynClient := NewKubeClientDynamic( opt.HubCluster.ClusterServerURL, opt.KubeConfig, opt.HubCluster.KubeContext) - mca, err := dynClient.Resource(NewMCOManagedClusterAddonsGVR()).Namespace(namespace).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + mca, err := dynClient.Resource(NewMCOManagedClusterAddonsGVR()). + Namespace(namespace). + Get(context.TODO(), "observability-controller", metav1.GetOptions{}) if err != nil { return err } @@ -88,3 +106,17 @@ func CheckAllOBADisabled(opt TestOptions) error { } return nil } + +func CheckAllOBAsDeleted(opt TestOptions) error { + clusters, err := ListManagedClusters(opt) + if err != nil { + return err + } + for _, cluster := range clusters { + err = CheckOBADeleted(opt, cluster) + if err != nil { + return err + } + } + return nil +} diff --git a/tests/pkg/utils/mco_pods.go b/tests/pkg/utils/mco_pods.go index ee3465c27..63ce6173a 100644 --- a/tests/pkg/utils/mco_pods.go +++ b/tests/pkg/utils/mco_pods.go @@ -21,7 +21,12 @@ func GetPodList(opt TestOptions, isHub bool, namespace string, labelSelector str } podList, err := clientKube.CoreV1().Pods(namespace).List(context.TODO(), listOption) if err != nil { - klog.Errorf("Failed to get pod list in namespace %s using labelselector %s due to %v", namespace, labelSelector, err) + klog.Errorf( + "Failed to get pod list in namespace %s using labelselector %s due to %v", + namespace, + labelSelector, + err, + ) return err, podList } if podList != nil && len(podList.Items) == 0 { @@ -40,7 +45,13 @@ func DeletePod(opt TestOptions, isHub bool, namespace, name string) error { return nil } -func GetPodLogs(opt TestOptions, isHub bool, namespace, podName, containerName string, previous bool, tailLines int64) (string, error) { +func GetPodLogs( + opt TestOptions, + isHub bool, + namespace, podName, containerName string, + previous bool, + tailLines int64, +) (string, error) { clientKube := getKubeClient(opt, isHub) podLogOpts := v1.PodLogOptions{ Container: containerName, diff --git a/tests/pkg/utils/mco_router_ca.go b/tests/pkg/utils/mco_router_ca.go index 9c682cad5..3d0101dd9 100644 --- a/tests/pkg/utils/mco_router_ca.go +++ b/tests/pkg/utils/mco_router_ca.go @@ -13,15 +13,29 @@ import ( ) const ( - RouterCertsSecretName = "custom-ca-secret" + RouterCertsSecretName = "router-certs-default" + RouterCustomCertsSecretName = "custom-ca-secret" ) func GetRouterCA(cli kubernetes.Interface) ([]byte, error) { var caCrt []byte - caSecret, err := cli.CoreV1().Secrets("openshift-ingress").Get(context.TODO(), RouterCertsSecretName, metav1.GetOptions{}) + caSecret, err := cli.CoreV1(). + Secrets("openshift-ingress"). + Get(context.TODO(), RouterCertsSecretName, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed to get router certificate secret %s due to %v", RouterCertsSecretName, err) - return caCrt, err + caSecret1, err := cli.CoreV1(). + Secrets("openshift-ingress"). + Get(context.TODO(), RouterCustomCertsSecretName, metav1.GetOptions{}) + if err != nil { + klog.Errorf("Failed to get router certificate secret %s due to %v", RouterCustomCertsSecretName, err) + return caCrt, err + } + caCrt, ok := caSecret1.Data["tls.crt"] + if ok { + return caCrt, nil + } + return caCrt, fmt.Errorf("failed to get tls.crt from %s secret", RouterCustomCertsSecretName) } caCrt, ok := caSecret.Data["tls.crt"] if ok { diff --git a/tests/pkg/utils/mco_sa.go b/tests/pkg/utils/mco_sa.go index a23fac71e..fac52ef85 100644 --- a/tests/pkg/utils/mco_sa.go +++ b/tests/pkg/utils/mco_sa.go @@ -47,3 +47,20 @@ func CreateSA(opt TestOptions, isHub bool, namespace string, } return nil } + +func GetSAWithLabel(opt TestOptions, isHub bool, label string, + namespace string) (*v1.ServiceAccountList, error) { + clientKube := getKubeClient(opt, isHub) + klog.V(1).Infof("Get get sa with label selector <%v> in namespace <%v>, isHub: <%v>", + label, + namespace, + isHub) + sas, err := clientKube.CoreV1().ServiceAccounts(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: label, + }) + if err != nil { + klog.Errorf("Failed to get ServiceAccount with label selector %s in namespace %s due to %v", label, namespace, err) + } + + return sas, err +} diff --git a/tests/pkg/utils/mco_statefulset.go b/tests/pkg/utils/mco_statefulset.go index fae1b2b90..e12ae78dd 100644 --- a/tests/pkg/utils/mco_statefulset.go +++ b/tests/pkg/utils/mco_statefulset.go @@ -29,7 +29,11 @@ func GetStatefulSetWithLabel(opt TestOptions, isHub bool, label string, }) if err != nil { - klog.Errorf("Failed to get statefulset with label selector %s in namespace %s due to %v", label, namespace, err) + klog.Errorf( + "Failed to get statefulset with label selector %s in namespace %s due to %v", + label, + namespace, + err) } return sts, err } diff --git a/tests/pkg/utils/utils.go b/tests/pkg/utils/utils.go index 46f53b1d1..07f265ce5 100644 --- a/tests/pkg/utils/utils.go +++ b/tests/pkg/utils/utils.go @@ -21,6 +21,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/version" "k8s.io/klog" @@ -181,7 +182,9 @@ func FetchBearerToken(opt TestOptions) (string, error) { } clientKube := NewKubeClient(opt.HubCluster.ClusterServerURL, opt.KubeConfig, opt.HubCluster.KubeContext) - secretList, err := clientKube.CoreV1().Secrets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{FieldSelector: "type=kubernetes.io/service-account-token"}) + secretList, err := clientKube.CoreV1(). + Secrets(MCO_NAMESPACE). + List(context.TODO(), metav1.ListOptions{FieldSelector: "type=kubernetes.io/service-account-token"}) if err != nil { return "", err } @@ -225,7 +228,9 @@ func LoadConfig(url, kubeconfig, ctx string) (*rest.Config, error) { } // If no in-cluster config, try the default location in the user's home directory. if usr, err := user.Current(); err == nil { - klog.V(5).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", url, filepath.Join(usr.HomeDir, ".kube", "config")) + klog.V(5).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", + url, + filepath.Join(usr.HomeDir, ".kube", "config")) if c, err := clientcmd.BuildConfigFromFlags(url, filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { return c, nil } @@ -283,9 +288,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientAPIExtension.ApiextensionsV1(). + CustomResourceDefinitions(). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientAPIExtension.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientAPIExtension.ApiextensionsV1(). + CustomResourceDefinitions(). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { existingObject.Spec = obj.Spec klog.Warningf("CRD %s already exists, updating!", existingObject.Name) @@ -298,7 +307,9 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().Namespaces().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + Namespaces(). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { _, err = clientKube.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{}) } else { @@ -313,9 +324,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().ServiceAccounts(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + ServiceAccounts(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().ServiceAccounts(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + ServiceAccounts(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) @@ -328,7 +343,9 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.RbacV1().ClusterRoleBindings().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.RbacV1(). + ClusterRoleBindings(). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { _, err = clientKube.RbacV1().ClusterRoleBindings().Create(context.TODO(), obj, metav1.CreateOptions{}) } else { @@ -343,7 +360,9 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().Secrets(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + Secrets(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { _, err = clientKube.CoreV1().Secrets(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) } else { @@ -358,9 +377,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().ConfigMaps(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + ConfigMaps(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().ConfigMaps(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + ConfigMaps(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) @@ -373,11 +396,16 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().Services(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + Services(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().Services(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + Services(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta + obj.Spec.ClusterIP = existingObject.Spec.ClusterIP klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) _, err = clientKube.CoreV1().Services(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } @@ -388,11 +416,16 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + PersistentVolumeClaims(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + PersistentVolumeClaims(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta + obj.Spec.VolumeName = existingObject.Spec.VolumeName klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) _, err = clientKube.CoreV1().PersistentVolumeClaims(obj.Namespace).Update(context.TODO(), obj, metav1.UpdateOptions{}) } @@ -403,9 +436,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.AppsV1().Deployments(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.AppsV1(). + Deployments(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.AppsV1().Deployments(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.AppsV1(). + Deployments(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) @@ -418,9 +455,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().LimitRanges(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + LimitRanges(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().LimitRanges(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + LimitRanges(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) @@ -433,9 +474,13 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.CoreV1().ResourceQuotas(obj.Namespace).Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.CoreV1(). + ResourceQuotas(obj.Namespace). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { - _, err = clientKube.CoreV1().ResourceQuotas(obj.Namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientKube.CoreV1(). + ResourceQuotas(obj.Namespace). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.ObjectMeta = existingObject.ObjectMeta klog.Warningf("%s %s/%s already exists, updating!", obj.Kind, obj.Namespace, obj.Name) @@ -448,7 +493,9 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { if err != nil { return err } - existingObject, errGet := clientKube.StorageV1().StorageClasses().Get(context.TODO(), obj.Name, metav1.GetOptions{}) + existingObject, errGet := clientKube.StorageV1(). + StorageClasses(). + Get(context.TODO(), obj.Name, metav1.GetOptions{}) if errGet != nil { _, err = clientKube.StorageV1().StorageClasses().Create(context.TODO(), obj, metav1.CreateOptions{}) } else { @@ -457,34 +504,47 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { _, err = clientKube.StorageV1().StorageClasses().Update(context.TODO(), obj, metav1.UpdateOptions{}) } default: + var gvr schema.GroupVersionResource switch kind { case "MultiClusterObservability": + gvr = NewMCOGVRV1BETA2() + if apiVersion == "observability.open-cluster-management.io/v1beta1" { + gvr = NewMCOGVRV1BETA1() + } klog.V(5).Infof("Install MultiClusterObservability: %s\n", f) + case "PrometheusRule": + gvr = schema.GroupVersionResource{ + Group: "monitoring.coreos.com", + Version: "v1", + Resource: "prometheusrules"} + klog.V(5).Infof("Install PrometheusRule: %s\n", f) default: return fmt.Errorf("resource %s not supported", kind) } - gvr := NewMCOGVRV1BETA2() - if apiVersion == "observability.open-cluster-management.io/v1beta1" { - gvr = NewMCOGVRV1BETA1() + if kind == "MultiClusterObservability" { + // url string, kubeconfig string, ctx string + opt := TestOptions{ + HubCluster: Cluster{ + ClusterServerURL: url, + KubeContext: ctx, + }, + KubeConfig: kubeconfig, + } + if ips, err := GetPullSecret(opt); err == nil { + obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips + } } - // url string, kubeconfig string, ctx string - opt := TestOptions{ - HubCluster: Cluster{ - ClusterServerURL: url, - KubeContext: ctx, - }, - KubeConfig: kubeconfig, - } clientDynamic := NewKubeClientDynamic(url, kubeconfig, ctx) if ns := obj.GetNamespace(); ns != "" { - existingObject, errGet := clientDynamic.Resource(gvr).Namespace(ns).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) + existingObject, errGet := clientDynamic.Resource(gvr). + Namespace(ns). + Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) if errGet != nil { - if ips, err := GetPullSecret(opt); err == nil { - obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips - } - _, err = clientDynamic.Resource(gvr).Namespace(ns).Create(context.TODO(), obj, metav1.CreateOptions{}) + _, err = clientDynamic.Resource(gvr). + Namespace(ns). + Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.Object["metadata"] = existingObject.Object["metadata"] klog.Warningf("%s %s/%s already exists, updating!", obj.GetKind(), obj.GetNamespace(), obj.GetName()) @@ -493,9 +553,6 @@ func Apply(url string, kubeconfig string, ctx string, yamlB []byte) error { } else { existingObject, errGet := clientDynamic.Resource(gvr).Get(context.TODO(), obj.GetName(), metav1.GetOptions{}) if errGet != nil { - if ips, err := GetPullSecret(opt); err == nil { - obj.Object["spec"].(map[string]interface{})["imagePullSecret"] = ips - } _, err = clientDynamic.Resource(gvr).Create(context.TODO(), obj, metav1.CreateOptions{}) } else { obj.Object["metadata"] = existingObject.Object["metadata"] @@ -586,7 +643,12 @@ func HaveCRDs(c Cluster, kubeconfig string, expectedCRDs []string) error { return nil } -func HaveDeploymentsInNamespace(c Cluster, kubeconfig string, namespace string, expectedDeploymentNames []string) error { +func HaveDeploymentsInNamespace( + c Cluster, + kubeconfig string, + namespace string, + expectedDeploymentNames []string, +) error { client := NewKubeClient(c.ClusterServerURL, kubeconfig, c.KubeContext) versionInfo, err := client.Discovery().ServerVersion() @@ -692,7 +754,9 @@ func GetPullSecret(opt TestOptions) (string, error) { mchName := mchList.Items[0].GetName() mchNs := mchList.Items[0].GetNamespace() - getMCH, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()).Namespace(mchNs).Get(context.TODO(), mchName, metav1.GetOptions{}) + getMCH, err := clientDynamic.Resource(NewOCMMultiClusterHubGVR()). + Namespace(mchNs). + Get(context.TODO(), mchName, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/tests/run-in-kind/req_crds/prometheusrule-crd.yaml b/tests/run-in-kind/req_crds/prometheusrule-crd.yaml new file mode 100644 index 000000000..1dd024388 --- /dev/null +++ b/tests/run-in-kind/req_crds/prometheusrule-crd.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.6.2 + creationTimestamp: null + name: prometheusrules.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PrometheusRule + listKind: PrometheusRuleList + plural: prometheusrules + singular: prometheusrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PrometheusRule defines recording and alerting rules for a Prometheus + instance + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: 'RuleGroup is a list of sequentially evaluated recording + and alerting rules. Note: PartialResponseStrategy is only used + by ThanosRuler and will be ignored by Prometheus instances. Valid + values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/main/docs/components/rule.md#partial-response' + properties: + interval: + type: string + name: + type: string + partial_response_strategy: + type: string + rules: + items: + description: 'Rule describes an alerting or recording rule + See Prometheus documentation: [alerting](https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) + or [recording](https://www.prometheus.io/docs/prometheus/latest/configuration/recording_rules/#recording-rules) + rule' + properties: + alert: + type: string + annotations: + additionalProperties: + type: string + type: object + expr: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + type: string + labels: + additionalProperties: + type: string + type: object + record: + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/tools/generate-dashboard-configmap-yaml.sh b/tools/generate-dashboard-configmap-yaml.sh index 972b0931b..363898d39 100755 --- a/tools/generate-dashboard-configmap-yaml.sh +++ b/tools/generate-dashboard-configmap-yaml.sh @@ -45,7 +45,7 @@ start() { savePath=$2 fi org_dashboard_name=$1 - dashboard_name=`echo ${1// /-} | tr '[:upper:]' '[:lower:]'` + dashboard_name=`echo ${1//[!(a-z\A-Z\0-9\-\.)]/-} | tr '[:upper:]' '[:lower:]'` while [[ $# -gt 0 ]] do diff --git a/tools/grafana-dashboards-for-ocp3.11.md b/tools/grafana-dashboards-for-ocp3.11.md new file mode 100644 index 000000000..6c707e385 --- /dev/null +++ b/tools/grafana-dashboards-for-ocp3.11.md @@ -0,0 +1,58 @@ +# Import Grafana Dashboards for OCP 3.11 clusters + +_Note:_: The grafana dashboards for OCP 3.11 clusters are provided in ACM 2.5 and above out-of-the-box. If you're running ACM < 2.5, you can follow this guide to import the grafana dashboards for OCP 3.11 clusters manually. + +## Prequisites + +You must meet the following requirements to import OCP 3.11 clusters: + +1. `oc` (ver. 4.3+) & `kubectl` (ver. 1.16+) configured to connect to your ACM hub cluster +2. [jq](https://stedolan.github.io/jq/) command-line JSON processor >= 1.6 +3. [gojsontoyaml](https://github.com/brancz/gojsontoyaml) command-line tool >=v0.1.0 +4. [sed](https://www.gnu.org/software/sed/) command-line tool. + +_Note:_ If you're running the steps in this document on MacOS, it is recommended to use GNU sed installed by `brew install gnu-sed`. + +## Getting started + +1. Login to ACM hub cluster via oc command line. + +2. Clone the repository and check out the `multicluster-observability-operator` repository: + +```bash +git clone git@github.com:stolostron/multicluster-observability-operator.git +cd multicluster-observability-operator +``` + +3. Create a configmap that contains the custom metrics allow list for OCP 3.11 clusters with the following commands: + +```bash +curl -L https://raw.githubusercontent.com/open-cluster-management/multicluster-observability-operator/main/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml | gojsontoyaml --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' > /tmp/ocp311_metrics_list.yaml +oc -n open-cluster-management-observability create configmap observability-metrics-custom-allowlist --from-file=metrics_list.yaml=/tmp/ocp311_metrics_list.yaml +``` + +4. Load the OCP 3.11 dashboards to your ACM. + +- For ACM <=2.3, running the following command: + +```bash +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i 's/clusterType=\\"ocp3\\",//g' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i 's/clusterType=\\"ocp3\\"//g' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i '/namespace:/a\ \ labels:' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i '/labels:/a\ \ \ \ grafana-custom-dashboard: "true"' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec oc apply -n open-cluster-management-observability -f {} \; +``` + +- For ACM 2.4, running the following command: + +```bash +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i '/namespace:/a\ \ labels:' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec sed -i '/labels:/a\ \ \ \ grafana-custom-dashboard: "true"' {} \; +find ./operators/multiclusterobservability/manifests/base/grafana -name "*-ocp311.yaml" -exec oc apply -n open-cluster-management-observability -f {} \; +``` + +5. Then open the Grafana console and switch to dashboards page, you should see the dashboards for OCP 3.11 clusters are under `OCP 3.11` folder: + +_Note:_ For ACM 2.4, the cluster overview dashboard for OCP 3.11 clusters is located in the `General` folder for legency reasons. + +![ocp311-dashboards-example.png](ocp311-dashboards-example.png) diff --git a/tools/grafana-dev-config.ini b/tools/grafana-dev-config.ini new file mode 100644 index 000000000..e69de29bb diff --git a/tools/grafana-dev-config.ini-e b/tools/grafana-dev-config.ini-e new file mode 100644 index 000000000..e69de29bb diff --git a/tools/grafana-dev-deploy.yaml b/tools/grafana-dev-deploy.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/tools/ocp311-dashboards-example.png b/tools/ocp311-dashboards-example.png new file mode 100644 index 0000000000000000000000000000000000000000..9e4a4134c3709486cd19040c9b13431368005a8e GIT binary patch literal 187880 zcmdqIg^z|gauWEsl(;A;DEN|}M3qobuuM@K5cBU5ACMYPMLKD=nRg`-vGqu0P#0;Ym$2s>s?~lJbA=`yi)ScW2zuC?WfNgOt*qy+q^Dw^QEEek zseeCpydq-#OqZC%qv_8|UmtS)ehT?hQ4RA?){E>W(!`@er7YC^~QJ3hFl4H8`>M25G6UMg(u5yQhY9=XnhEM0{T}I-`uXDj7;? z*{h5+&|Qi7aytS`_Cg(9@ZH1JyW`LgzTU4yms}JTejljot{XeAv3#S?&Zb{=JRSFX z43hiCP#_MoodAtNV!(-_+u6y*30>&8OtH0Xkt2SI=!?TW70(itpWo{J{e8Zj&bTWI zLH>^e-vcT$1aVtl8OnQTEMwBM!uLAI^c@XfxaTdzJMc76J(HuUj?nJu$-nq3(m#6@ zpu$e9rinAvL?VO9*{P#|*4M#NNm+`X)}dDUcpnp62xN}d(`jLks)IGvsoMzV?T{YE zUknt$5dMUNpYu}u>xa*Q@1NU#C4BPr%kz;>{NKX9ztsFn9{Z9r_UaQwHJWCKwQzB) ze$3C`4(rsT!Smv@FEV~ptv?ORQBk4e60;duDJ1Osm^UoyjFuWCC;WKiv-2Y@f3+OX zVcxU%VAR&A`aIwWEuKLtIeJY;`dhk)$ga;88Vn3wrbZBr4{AXZpCgZa+rsLMbf+cK zzm7RD!12`WUuAwdJz~DJ_Ile!uoN`4!gySADbhmqAeBulgCmC_h!(=07$cS}#q>P> zsVflyv0P{CxUjqYt0xqqe7QXH;YGi!SMgRquVSqVeCDi*yiA@Jc@Z1&N$xv}(wk7} zKE{NY$!`}iQL*JQyq{VrQzrZID<7SHS1?sYVu4oHl0s}oMNw7PK`E!$yhSa zIFmFx`Fr>4*{i&dv8yCb=fv ztbcP0aDPnW;x@Aun4X=anfFw&)|OO3&E#tctFu1qoA?g;Z!fRKtW|cd zRKMKa-eGbsb#7Vz!t3!NG@bi>9sWzaQyKp?kVWYGw}$@w|1gyUwLe!Z223c8m!>&5cY@z;Pygn~J-#x3pkVEJldXGsgF%Fqk!8G*)?7kjSFEK1d`{?;x z`C#APpSWGXj+YJ|yuL4d>iliD#CF^M@O*dC!uOzN$u5hg^5N8r| zzDRxfC{{7H|D~Oy%m;ost^&_^M4@J$oVa4%liW!$P?&2(qBu{KxI{E%tK(TKc^bKc zjQ5+b8^6jUTItNlcwWOB-u`;)V8qhHA4x|n$ayb=1~OFo0sn#seI6?P^pIhOu{TcZ zTU}faqdKGSr$@4yv7-X6m(!X?!0(g=VM^a-){5%Wua#4AZj7 z+$-~c!Pw0I2tB>Wz%8=XV9dyGT(^ESq}}R-_OU-dwWoN?0AH9fmf-I zdwWge3S@2L-b+foI#9mNFU=ffi~I(Ud0zdfI@rtGy|A6BA?e|juLhBZg61$Em&b0W zMf2FNF@T&~dYk!m&5xrW2b@O~Efj2=Y^FmQSN6}WCKG3V??=qAIhAgbz;>9zm}a?> zIC#w9ho?GQWyuIzpT67f&tt2(t385Zw<^lHsxR1@nmT+t^Mfuwj{KMugNIK7uC@Ar zApK1(HN6Xuw|2)@SrWH8$4(F5240s*wX~lHLDJ& zLP{3PGHb7Onk26Bx#GD_xDH51Nb#-8rU2SE9vSuv=Y27K-g~l>K&=Nubl;x|k)2dl zJSny;b7vMc=BH(B#M-;X)+K!nPa2+CdpEOr%%vO%%$2y2A2Q7yn-sX4-IQPIGM2QK zl(?ns>uCi}{BZ zMb1PZE}s_8n&DtWZ%+|Rk+~Jc5N*mkD#E+Xler;=@ch1)SQ3e}5NbAoS>Mtl4f&ey zO?&Q>$31LuTJTJA&(+!9w~X5IVi~-iytikeoTZ#SLs;oHyjyPYBaf`pbFvn|xb9ht z`?ecwiLee_H>rElHf}$Ia5S3xU7x2OP)^-W2|hpL8mfCdWmMj<@Tu8hZ}T$kpy9G1 zb7`pc?pm%hqYWyCz(GhI?#%;z4NkIm2}1-g>OAY{Jjkz)R`b3{xY3=`wtKzsJZZ|i zWInLC;yg8*fGuS5xCFcD`MiaT^+@;jx0Qbrba~Ku=)0e&@Wp&^sq(aT>#^F|yXOgC zqHJBb8d}qpme+v=B0LUn1mZ6!&*9hGr^f>Y@$#_id>;ny^2^mD$g$P?S~H^fb1MV4h}XX1e=qGt+Sy!o2}E! ze|7SAKcXg1MvfNt&K7pI^ndhg_}R|I8T8`CAB6tr^DjP4+%5i1$=2yFv5*1+|7Zce z1+WAEFEnQh)Bl3@N6Wv^{*>!qb4qUL_MJJ8PFe z22s`4!dZZWANYS&|8FeKf1?S!eTSs=C(?h`{%;J8{}basYyUTfyrTtjAPoOtP2lZc z6#ldBFL-|7AIkp^WB8Y){aK4NH33|H;QyGd050#xVFU__5Q?Ozu!=kC{sOkwE0epP z<0XjBVJkwrp`c>?*PEpdeL;6a z2iAWD__F~G6^-+jp56}y4d=hz>Rt&MmC z;eTQpXW)qQpLoF;|Ayk9q7`7u`Cr-nqrsoT=O5+v*Lp(Ft}H!ljrC7F)3g7C{lmE6 z&?`uzVVGJzN*(-fL-!|>7^asL|EwuVzYZBT;Tnqnu1q2!zYaV>^nVgsi0u(ka?xvZ z?!bTIc|Ad5_`8L&)%f>NE(ahk+PSX5-eg(*LTmjql5yrBA7C1no#7S!>7KLT^n0Yj z6_zkv;nNn{+;+vMzGS;M9?O@+|07H>V~11l?o|~x8jf+fknZ9XBi_JvwBugA4X1hn zNP@l#4Oiis-b5GE?6OtnSQ5gE6fm0-4L9b-3etS-mO}}J75|4wfM=Rzp{*KZV( z5a*B3Id_@!s>=-04rZIiF7{`1vtWuDG@jv;Wl}btGpk(?fzupU3`ZxYz(u1ntpyHcahL8(+n(wH=$Bb=@X9`&?S$OSTFp=lr)2M4y!Ww(ulB zR*@s6fcz;1wNW<$kDe(mLC}d>l8ECaA2BBUq4kL; zN50{AOPiLe-F^Qd%DBH@d*b<2y-Vd|>nBJXE969dXn;Mil@8KKk|!VY3CmLeJ3YP0 z6zqco{;Rr#TrgPh22=_hC&m70&JZyXBaQiUC&@p}PldccOf^2K?>_|pA3grPMEE~J zt_H84$vpq3DagfwTn@xNim3l->iqwkCI6OhlKFoQnx#~>?oCv^;ZNSVIwvz(n*i{b zO15wy_1tC_CzGYVN`ag zefo@To||eq>R`^MC+eWC?QVA3V9xcxW;teMvRp(W?SoC&!AuR5o4cR;Z(aYXJ%6pI z+k6Ju_*C4vN^^UQc?CP?Kb+*}T;1f8haiSO#V0EjU%Cl4^we7~#@9nr>qVH~2Kgypc5|?b~ zf_o18kja**-(M`D4L0?Wj~VBx-7+f!Hk_UUddP6-W7^Pgf1mA-0r3oe!FR-y(5$=1 zdjXYIFE2`J?@O_d7d+1hE)>Z`t<);;@|~Pct=9!^80QfS0Q<_%GfLVg8$Xsmv#$iG zUVa3b1V_*JoSaD8IwK`EoU6XvOI_0H6SQ1zLk^*t zpVF-1WSlaff>~(1sV?`~@@k_}*qeyImZ@{3=aMpM!|K+ms!4_Eb<2%2q@otRLHtQ> zc}f2MWf5ldhYJiUUN2FiY)Kb;@%F925C)%2a%do(?~(1%K#tp?&t#oM#qwTgmN&^f ztP9X@hSU)~DVhaUa<9$&JXVHp#YQ9IB&^L2v~6HDTa+dHu(qMLjmAZSLG+s%weItb z=b1kQY-fJR^(>r_w$^&0`(ok2AnpCi)4BS(3$6O|9lj1;XZYtJsOyYbRz%4=%dC6! zK$ppKP1Pdo<6ZE4S6%nc{%Dr>3OKe@tEpvlPOkRj+y%`?b<7r1*{pA)YLcP#+A~+U zmgiLO-)q?P{eoFrbtd+y5d$jTIkx>)MXi`+X=|9OhJ@?xHcn2C-0|;hSq4d4G($1fM3Ma z`%_?QdnuT1^uT7WDd>KIUdx7gPQ0^T=xwI;DfhnW6LCl~VnD-*CmqiAD)2p>gLgjQ z-W)o?;X$R64DI?dD_i?ptl zA@h>V{20<=(9e+6U22l0IB7X7Jn1v8SZ`(3ML5R|Ot#jF4iNGtGao5VU_N>Ryc``~ zp#GJnAFGL-RNVUwZ-D728+Al=T*P$u`y&XL*gU2gX*?R4(sz(d0x49riI82=1GnP# z!wEAXg$+xxGbP(uQ85sUSAY7!Tz%79(|Sy1m>Tr)Sm#giXV_-JbC-lY`L7bvM!T=7 zpML{R!TReFfCbOQ53lat?EGk{7TfAW_=PsumT=qr$d)l&0W?JB`lacs8DW=c*UZi~ zX6pi{Kt4@-V#<2nKA^>admy9o*@|o3uJ=Jns*OgN+T3$bqaS*~hy$}Euft-4H-qiB zI|%L`2(Q`)#{mbp0OJ&w46969$AAN4re$Or2^YspcI6f3@=3HiY`p59QSW&RMN*zt zv$2mYnZyoDn;d#e%xz_PQ#SAnY-L3@(7^RHgsDNl5f+0=r0FxQW^V3FH2X3i9_9{w z$aYo~35fKc`w8`7jB>Lpr@YkRG14JQRwSf4PnFCsO*uq$ag&9Mf4%Jir63<1 zs#L*cr57LU0GSIZCOMDM+y~d1TgTJaX-?VJo3Gq*s4@@d-Zt3iNCgMO(bSmR-)R!>yL-$Me=n&E7Q8s-uX*rkC?ss}jz4v+U zAJKsDcMw%sPy-T&*ws~&{r}U(1HCoe_eqlyLz29wIZcGAbu8->aY8~Qn?D(mdYZ1fY z1a5SidSM({9(nlzoGVc(>?8HM*lw`t*BX`=r8@xgW>z?5GweXn{1YUThAB|Y&Fx|F z$Aa0OfNy`IEWqeDuN&Q+$+S_AU8!BWKb*GR;po<|M0#g&f{T6pt_2=$)_gN)9G)rf zygS;pFb6e5warQ>5=G=tO_n__8P+H~m?Ptg(ccE+bxFTogf5|2%@;~mX4UBQox6)6 z8l|K&*OM8GLRSAS`F$JQP1#+aBolOtnXitc%5W73a5PY+%0%60_#QV=?AR5)B2|SD-Gt zQ7p9b>xGQ@;r$9Cu+eRIzD}jM1gjiYCNH+JHwFc$s36@>4MPKQGEiHozPb-l50LEe zPMQp3XEM*s-a4b%8Td3=QLEhhLa=b4s=;!ByU{!i9#ANN|7A3fnVyL4K6zZ5nKF52 zrXcIwnRz|;wPkU($h*2!z%y@MtEqAT5*u`XlTkkP$-_To2!H|+QmEZ_SBbaHRMnv=k1tcHT^0lOq6+?mJFJVVFu8 z^Ez)^>+|g<8+5;LYZo=h|HVL4;1wv3`Eqlx{Jm@%;SlwGUEYsO&cn8IamXk{eJZEP zlY8Y*-zAIskRqr)Hf}6i2}VFtNzT#Yyk?5YK!P*cSg6eCy;*Hz{n$-^hR*r0*o@5Q zR@b}9Og`K`C5)jNT6Lq*obNt)jcZ;dP=PB~WoDhSlYxaz z*RN}8EotPgRS(SY6o26NPqRnI{g(P^Tvo*6Rlm4YG7c}^)-z4CQrd}!qY3gdoGYB| z%xyL6g<_cIO34)D38`V2VnCOhZsJZFd&cloh;Ue_JV zW^+YFDXxTx5_QbOjXN0PV!F}6yAEu!{o*4ibi7)O%(_|Q1eKv~n5qtTzf#3Yo*hnf zK0bmO8s$Cx+j~E+>JWqM+qKVH@HrfTm5T>-(&SH%ld3JsB z*V2Og?KW0YUqpZq=&6EzK1ZYgX}Y3(8YhgG4~L}?J2H%L_WNUflT4bzKXL~O)qB1H z$z}jkPYcTWEpvju;_Bd0^7b*vUN#P_T$w#x&LiZ1hcmGK*|@@+5T|*tT%&$mtwKw$xa-rtEuIVW#jRN1z)qa(TGM6q zFUa_;RTjDqFg!T_87U>~Wqm_rqAHPj->+kUd(~XUu2$p#VLmc8MvP^8+hU*N$dK9? zkIpjpq&Ah+Y=T3-C@QL%YXFW43@HJ4VzQZv^`Ipy}MptDQvsPoQlEEvCB`h0jf zw^-s(pxyw!pB)nZQ5#>^uctusX0%RdL`FDKUdnl4$hC|fB#-g(1DyYnp_y|RIXSkLa*>due%T= zH7(73H*#AB%Q1#4FLoi{A0M2AVa2M1+BB&w@_MPi$etfB)2ff)vp4TmGR3C#Dr8n% z4O#0JH{*=_I~+A0>Id$pKCa8nq_t~3u9i(X^O(PTf722!gYO%ksQwz(s_(tPCS05; zLFdD+BZ1#=;etOW4zIIguWY8R+`YbUdbsVcZ#?U6OZV)2-RV+C>CpPHsO{Q9B`D4| zf%LW85AqxiZ@QkeAsq1+XP1W$P*?u+tNAh zOL7}ef6cZ%zdoMfS+yxGg8B$vjoWD~-J=J@i%Tuk&Yy!HmQLm89NnU`gDuo`yj!7y zR!_<)j3Rv(X1HfmL?-@YFeWMn#=5tfsSinMQkV6kL+e{aO& zQ_AB3`>%>vJ8QnQZj^D(WRg8I?xN3b5Ds9Lx^)PK`jcC9yIetqI~O z7qIEJ{N{DuwGIqZ&ar}ybjW0_9*3#OTQd-H`A`cK|DLf6L7b5l9wHFu0QR_h!EgJ| z(RQ8>cSZsohdjvCz-Gu@k}Oa5(&hIlWnNC*WIXD3Z>%YDuy_0Pi>Daeh|9t=>THM4 z%tyK+Y0FEt>F&)H#7~_Kxy&vimS0Ga4eUP8K@qFn&J|}K%@t1BMyRGt)_Zlln#FHo zak0p-nG2&`wc-WBFiq!uXM^Rto<-!XX(fvG7bo7-CZBdtP-lveK^Ex`z+|!8q*RyYY@rnmuKH zDS!K=LSdNAjaYIZ@CB}=s#bxhe~LdGwVn1vJbaSXnb6t0*NsrluJMYo9OB)38H+p4 zctQworYrj1dhNbRCO|*C$k&JNzE8SRT1Wr4H<8k@bk-~Ll0sxd8^_E;2Vmj|5xt0ng z4tb-3%ke!Y9WWbGTM0nj6O&~BoL1=xGOA=n1(*Bzu(6J->)baXF2&<>qU8=gcCdNQQ4) zG}j;q>|3K^Lv&Br3 zt-N=%hs3PtnVwDj{OU=Vweh~U--mYc>+b1!3`QC%h@(=OG#u@O zl9noT(CZxwQqKUFA4P-80dcxz8U>9pHdWgU{qiYrrO zuFu#sfHqbjOW;Z2yglCA2EJ5Wxjts7U9@$}BCjYk%xd@LU&I9G3R6V7<-HqkOTRtm z3|!>7F1;xpgYnP#lAf85I*5952kDz0{j!|u?@FqHwGW#tZEp*zT`gdnsrXZeGz}1^ zsA)BsQMOq-MV{p%;Hi^-h&ILuc?djd%Ja;Jn^!Pp$6ZsTcCl}x|Hnic3F4KK(C_Gi zu}%o*Da0l1+jBg%__*!?VAcRMfC&uG#;U(wQ1|<^*0GzZQ#5|-l#XRfO{ahBvs(iT zTWLQ%9C>Rp{xs~{BHH{C6l;3NJ8T;2Oib=HADVCbyQ1}EsWSX18s5nF`$1#D+v`51 zVE@i-snnLMH0v{}F3R0~1EC5_LW&l@N`Yzwm}($}Yhlyp?a#VpOFDiXbslX}Ost7J zw=ihqq*yps!l@V;PiWZv@ZPF88GoJP$wF5yFbG<*^zFNaG4&JQQxcG?3uuy6abz&5 z`!G~5`<2!tEXkun;LdVzz>Y`tP|AN(`xWz@u(cwLZUdr9W&<%+wxS~SRwGQDegedS z*dBT78qAVG*iXsEeDrb=2eEnzyQ?--PuO~I-O(!Al*XmSL7z#|6^}t?p9493X57!& z3I9Ge`jp^DOTKj4ztmL*pQI{xE&Cx2hW^rJ;!sA+v*0=wVt<`fjV9I~f3GA=NVBFjS(o>(uYk5Zs-Lh;a|K_?0jG&Eq_k#h{1%-q=fr#uT_ zv&ynefNm{(MBvIeJ>xpk4(pT`swZ4M44U4)Bb;OrPI-5%jEvdqf;_ygkON10&g8yA z0bMggq-hQ8Ws0Lm_Y2R_sH97|r+9v%uG?AT0JvM7Yg}FaL0r_w2Ze~Y%f{J$H3GT_eWA|Asyi#+EYa{WvY2e<;h(flRqWKy>mtsqsdfTFHb|GCWi2L3#|zv^!HY@a zyNc@RygH~|>(Uv1BK*hR`|04~FU#?xC7BWP*NTr_VmuRRFpMj!w8fy6@Dl z>4N~ZNh6--(mn(V#Aq8{Y(e$$4TP#>42{|-yW zrGBQ-LXuWzX+zTjmqnRGW7?*70gq)v``vOjEyJ9S&lU|}L#G_-a8xwFQgb(}$35%P zwB*;BPgQFkdeO4G8a0J&ss^Q1I|dx(ZmFDaciVr3&Wu9Cu*6je_Km}BJG_|Z6XrdR z`%m>=5go3|F5$5ej2Eg$>SJ;K9WBo0#_)=c=9t3;rxw6d<%@>At}gScgbxXPEArw; zLufDRmaQbwBd2oC>eV%XN$FU+t&l5kl+(O`G102L@__z~`=Ox;~qu=rE_q zu4~Lw@g{6oR$~=Q2pK|WWPotdo9_}Z@-Qw`5hU#m$yG6~Xgm8jY_vHvXX`vQFE%yQ z_S!d>_uYe_yZ~{_yRy@f-ntgMiRJ`oH_!uTVKj1$Sev_4-wxP*>9xOqbovD*P(@)Y z1l9*PsyN^439%tIR$-mfZcDgmoLZCE!{BrHo_d}7ysOvICy)kAEC{mIS>B1PbB z*)+3iCj|(q;MbF6QI4&m+J|&l`Qde3%zPC9=E+Ifc=-RWSAEn>B8ng6M-0lpc zCpA-CH0yWKZuU86>)!8Dw0N-zypEwYpHyQPNAJJ7?bUAIQkLjoLMG4yfe`#GC^H1@$%(v74^ z%vj)@4hxDtLsQxp*)*lGjgKI!>oY2Hm9p?j$)^I$q=mm-Jd(#i?0JbGpIaKcbx+tO zYy@#p?iOR48(%Gi%GAoj0-8rDyl=6dZ(Mu=8I@L%_xY|aXM)BXx?56l`c$`tIKHSt zkeHh1U+3~JtnBx3g#G(|wCvaM{3dOss}Vlv((q0~zE$%vR=u6=QR1FwSz&KneM|(b z2E>suw&m^`6^fgCZ9@aovwQ5O@tU~6W5ctIQBOAgHkG(X;Q=pvZW9cDN~G%H>J4Oo z`Xe<@l~PcYe}v7GiRg%AD9iEnM>dS*v~)>%H+afl`PYEnf-)*Ah zc;Aa?Ct-es=t6VdWP$dG))_r7`%QZuhKR!tr*Lh%<5TfdMIK|*32o4?1n$w z_XG{Hx!$yO97|+}rz5uqWFHe~@{AMuiHj!*dgCW;W`h>$r<&BnsluVK?sBM&cJI#< zhQU-!eLFU$M=G@r=hm~80Sv%NrvyN zLG;M{>I$qL&VNpzq*y5#h{2#t@@antTF8|RC^b{d=~5FR<3EA4yz}y*SbX|e6u4MR zBAoI|&Fk;{94^M}n=HnMdyPRGdL^g+-;L3+GbRjckZraa1yWN{x~S`$4CXiiLQ3AE zPG$KiKI3+DsnO&jiBDo;AnSMHo00`yKS6i^+>7<5W~?R>GjQwGHS5L6LuOfFYIev# zP?8Su;@CD=w^frQW_P^Fcpw6owb;NPc~GuaDLOLcQEsTMAQjw;gJ3Jd<#2zFX{c7N zWoXvC2*54949@PwGW*;Ur=q|x<1!Y>*2w6BkLFTwFcvs*F2Dk50FDG_rEV$vmW<@d zr@?DdFI|dH;WR2CXdG3-DWWbCHQMG6QxcR>j9MXADX$gi4i6s<7*E2OJi~>gFi8A7 z`ZmB5V&4dd%rj5guXD2pK5Xf33o_i@Uzugi^4T#?;Ti{09m%pqs9c}X?KgM&K5LtN32e=mM`Ak#tLNLtOA<>B z%|hS6D(-XJZoM*A+TJ>L@9Lf?&Quve%f8mi&8%u=SS%sq)KqjXS<$i$kZcv0o_F9o z?J7X%MUH>M{M%Z~iF}#LABg;sYXQH!9dmo(n-=?f>OlhOt@4GRJI0K;=;zG86RdaU z%b2Ef4MuXWPScfd%O|ZC@Kc*5_TD!)BaG2Fd4VDl`!w)AGR^|6+)k38L8~&`^M%`6 zJR|pAnvt51Fr1G!d+M*v=ch5G-%|bU9{najtus)6hMGXQ=*zd_UY5pfTN;aQdx*7t zVd8S^{uI}1Aoar_B!hzDOkD8di}LLY?u*uwvE(bv0w|U|n9F8%;KHfZ_Cj!*=rpoC zivL>UX1TTD3HSP`i;n$>$%=QFQW)rAU%AseZue?su;Hew_^#~*hkbK*qQ{whd${)2 zWSJQAICuFHQ51~Nc-!7>XfX`xRX-Tv4Ue(5XF$g3?j}!RAl+hA3trv?#J&*v!d{y$ ztc;iIsKz&_N>WJrg;rC155wk~wYZ$3~jf*~Q4KmIx>L$v^kp|Ev z@x*YlA;N(2Hd`9?5E3i{@79*7av^Uh(qU1C3hGlp>lkd=*+DHjyIKi$Y_#UOWPvu} zK%5dMgAv{mWx_o*IeKtx?dZ9qN2l(-sAu#3bo~MZZ_b=eJ7KUh&0Mzf zPji@uTtfU|`H=IPUV9lJ%H5R7*@s%<8$sH*v*~zwUG3GoYM?~8I8o=lylf~uHS0a! zb%Qr$;!`8N2l4xKC~#M%dn_9W_){>`!B1L-Hn{Pk%bPZ5S6|P1Yv$eOs(N|`8%5^} z^Xky7pCBRwN zr{jIi;v7lAyJ<>ENwT}loxaCJF-0vDR)6J63Yp@>zsq2mU3wi(*0hf;wlg3eHQSSU z2Wyw2`3f$1lP=gW-l|)K$#NyU_Wg~f&L~u;lT2T&RAZA-cX|DQo;{CZ_aL`=QFpAT zzlGRK^S0gR4tq9esg>jMo%immn;ZQH>%-Pl@uTMZO}-zQfIwX#5s+2x>x%b$0a;r; zKZdua^gWqd2**H@Ed4JKLepfB2xS!U$StptO98e8k&7Z_2CN#AmYs=$?atd6!#pWW z(|1u4+)3L`*b4=$@5d?~UMdF;xxeOS%EN*bzg&>^fDy!p>8@=R6X&53=Usg0FomU> z;=U^!(ZgQwBr`KnFzUT>YX3B|vU~6HovPntB%rC2lXw9^K+VCAX8>?MHOf-oZi+&K z3NYnmC>jm-kxZ;)5hGK5KWONq>TvO@2C8QPWLq7v2xw_1m&(0ElwiJIe&TbJ;J*DF zJ$^7jHEcW3W-_A4KQFWO3_d656_vY@hcADg^6dlK`ObZ2hs@@!&g4#J)CainO&FD^ z0vlhZ?`E%oKA?TKxpG3n#TB4s=CW(J(D!&7ZL4!hfXGd(PUzt@YjvwH68e$UbQf*; zR9>)N82&>@m;^*m$Kj>&BNdtFpvTfWDV0>Bf%=00P6x*15I^8-9%u$n3oPgo_2^m0 zYqAgFff^6>`Sn}v&a|q{{Rgvn_=ZMb*Gx96S@Y8BzNxidnB^{IeZ&#}M)lo9t{AIZ zPObj_{&EdKD_<^^h{qtiBgFApg98r*t{)y~X5wwjw!4?T1lblc|LA`*CRZO7 z=Av4{s|JGnU`Ec7v9!I;HtWjM2u`Z#DzG;&u7nk0sOEqxK z2#wjt=gd16+OAZr&t*R6xpXIMR4J2sWHBrRoF#G|+@0vOOyf>C!`NNdy0fB*)GhH= zbj1`5-kI6brN(e;cG-gmpfU!fowZK=q)<&xKBRogvb^pI(Dl440|;Hfvfs`Sku~zJ z{(jqee0v(&o~+c=dez;}ech3JW=om1RXf&hF}Co&m~wg+R`J8ucz^{AKl-&6Ju6v| z35PH6&8|pLlVZG#&wQgrz<;U0_q$c)%s@~%fMa$kERzNqq4+6&FvrnIb{45rr&r5F zo+~6-98M|U?Kza&Fqbed75N{Orh(+@4Lk1`ji=ss0byQe=Q}5jy zRh4)tc{P^i#*)w{Y4<}7lVit#D@R3H>uScLyUBB+pm0+@M5Gm}3*yu8FC+sVH6s zoW4RgKlq-C8Wek6nZcLV_AsV`)-Xv2oIrDu@bZVpj6hCsmQ)7Zvt87)dwV+CL0FJ3 zeuSe4r*D7eU{qHHBAO{Fs%#BS>#3|)I$fAY+FaqtiWyxrekSmj!0E7Cb5`S z0@5m~DObBMjEFVcF@GzJ zGTzz?5ChV9#^uY}Yz^g-XopTt9mh{`ir(CBXFhWSF12`gP=}eRVZ>!@F>d;kJ%inik_VALd-Bf;XpD(}elm$#E zK5aL@yhjEltc&*Uyy2Kj_)4Z%K<5LH+XdOhbe=B`Ey>N5s`t`r`Bs5Ux&bz|vw8Bh zc@i|~3L*GFd4>^MrjDBEEkQ>dZNHZ3u3z^|zLQurGpN%;?A6zw zd#-z3HX~0G$f>oR55Wj|yQ))Q^Emiyn+6_UUodVGeRq|kWYI?Va7wp7V3%Q*r(68t zATW$}(`sq_rPS*Se{Xiq@cRN!PfW8Rd!vQot%v|l8a`*-D1|Il5~PXt@?+zuF>U!; z&69uHSg>QzQZ3re-Pyf$4BfmrWCgosi+kStv@peeT%-5fJxuWi-M3aF8hl#WZ21?r z9axnEeKyZzL=O8{a6=osD)Emd)(u>Ut;Ng8{axz@CJrlSXLK^+3WM?{CzNI-iL`e7XJM*O~>G`E@ zz(P2d)#5r9b4mE9?Kt_K_pVE4zPitCX<3#gFO$NAwgch=`@#fbvEKvZD|wobI}@iCbHF+H-?Dsb4=W&$Hf^P=mv5^)Rmeq-MjjTvj@jxv##G>PqZH*2=>zO3NIL zl8TShfU`?c8O@E|Rv+?7dO~-P`qdG-TSakmnY38y<4~cS$xh6du8r9d;D_%bMP*BT zzNJzJUe(f8ldTf+f~@ZP`y9BxqP(>&Q3b|RJM?a7x4VB@5YN}_E^_IH`S@NFQtwj2 zh8!YI8xm&OFrnhmc42|b_?5d)9fMq781#XBbLe?5|Y? zp9jq~B-ac7@!FK-N1?KeXV64$(9Id4o+Vd;j9pslG}&(H|HIx}N5!=)Z=eamNq`6t z9D)XS*AN0hgS#ZSyUifM6FfLUgS)#779cnT8GImE(7|nxw{!3B$UQkH_x<_aT6eAa zV>8qA?(SXnb#+yJwZXUbs81Nlveinq#swn&K-Kcxgmdv9PceW`y-ccVUff z)}z)3MGzU)w`{%UUJp%T2}EpWn8`WuF2r}UsK0t)VsYf*N zZzhP@>ZgUznE628u(Ezn6ZRPzxIk$eeezop7P@iCTji2@5Mo$1M^^<@T4F1tg^9F^ zA(zb4b(EPM+y+4-c{94N8;fZ@9WA+0;nW4L7(XtwgqHemNhT7TX1$vi58tWwoc=a7 z?fHU&`Nw#vN`QQQo5^Kp9u0|LAdohpN}Um}qC%%w-f|+yR^PTUm!rd`FE6>Z@8yb7 zZ3pWOZyMwKPQ%%?`bB~3M9GzuTk1Dg5Wuq{M~;nW5WpS^I^Npx85Y$e$4)hRPEmw> zpa&8&7Ax`F<-__%_nK4`zC9}FhyH~?gGK+efsCAf#!0ozunk7-SN*Mj>#9it~ zGCgVH%dXqXXn;1oN1u;0{Qy+At9_Ur6xzh?;ZB`EYD@ksrXl@E4R{EKELJr@lm3Pi}FZ@^Xv)I?DuYB zSQXv%nF4nMPVab0#%S2nhyIdrAo(&sKW5vz#i_l;LmcSYaR70zT7YyLpb;8py*)r5 zPU03O#tAo)c_v2ijx$eD;mu3@-?@RCb!|hWg3PV2AvFMXD>fpRHe^$M6i6>47?Jqu zBeI{csqy?CFt`4^+T@i^Bhw&SLzbEq*Y$*k!hM zvQ{E^Q7tAo*XDNyrElX8KA^^dHNkK0m)|Y@XTBvY%<(c+Ys`LWoJ#~xQ(h?&qL^IP zDr<79{k~!^v4A|bk0B_3?1}$(RLG!3dBjF(t{l@b{##8{dEa zc`ObPfxP(oNvQsfhku>TzglMp{#a7Jd)vsrXf%!naJr6{n4H4?wvT_`HVPkDwy)o~ z${+rs5g?)fgEP2lfc+1(`p;1V|174B4lG6JSewF6Km9RNF)!SFHny=0@xah=G`fiU zxBCYNC2#+<^uHaqd@V7k!y2vU8@coOSVds~;iNw?u1XBxp}dg)h%EKZhj;T^IlxjR zw5|ncEG`uuweNr23-|uNsd-;CP9>azxphfPWZJ==Bj z#U5Dwc8LZAO>I2?lAaKh@l4bHHF=i{I1=5RKi0%|6MZk`lK`kV^cGSQ%fASc-~nR+ z21Mi^>-zImcCuIpyXjXOscG^*onC)i*x!4n7(+oHFuou(zWGUl{~)hX7y-}Uc%a~o z)UO)-uc!Y%uhT)a@Y}f*-F|?-0}yLV#QjlwOS>I^0SZ49etWvA2jIxstVfWl*Ef|b zoIEXPlhgNxaNCa8E4W7eOI`l4ivrwNJu%JA&5JcQ2%aA#N`}G`RZ=`u({Ys-;z@(a zylRDS@L&8Pg+E_?Rm<==#ILQfcPlwfDX|1ut#Cu;UrS|1^X6I zCgWr=z0YC*2bUg3gz~$x{5%5OFFd}D?|fAsC-(9fN1y>MM?MA={8kI#_pRiwYM0Ll zK(h!f_YKflV+aDn+gJj zZCt-=U{^t|&@`J)c5J#1OnAIzRFn5M&WzP@5(sOcB0ScgrLE>0Yiy<~yG#cYZB=L$ z9mn>;n8!}DTVHE*>b;%cY)v913p?f7%vG}sG%1hq^EcEEduQ29mEWI)NPu;!?S184 z5S*Pm8ijr0Xyley_ybiYvI**ieYfwRX^rI$T7UhF5vLs)_|J$Y9rt7U$Q6#OnBcS1 zR>m>LwM7Z+dq}*XC|I3=PcpBYLg8GY+As=%7gXWuf|p5Dv{g=62Mcq%LW;cNre)7M z37*LrA#%dkbQaxT;t{$wI^BM>JKwYumvC){bG6G`3wC>5057ZGBqHW=H!tw0ApfyB zZN|Pl2~D53Am%BKyb@SiFC%je(DT|ko~(Zv!_iN_=L}V}Ejr-;K3!Fp>a>7Td*h3y z<=q){GT@U|DJpvTDrx8Qkok0GPO8fZ|4B_&w`PBQiS^Q@hwJ$%ONd_BpV(-B-Rj0e z#9pA8HjfaOL9EFmh_^(8;4((88CXNk+f%&|@H8He$Jgw=rm6RUC{0T(iU8TWHy7hY8`jRk&{e^!^!>hK7d84DmZEb}7q>Li|%h zZmdGQwxp|rs=_lpbZMS5wrB09F+i9sV<23#k-1nMQPqR;?$L9ljq!%it4%1*4DT@C zM6Jy{5sSt`0Fn@l?=aaFU{`9+*6#G_RFAs}c%!_5cwZ1sR=K6OWsf6y{^MRImJxvr zFv(QX=B3EP?tupBbCUE|@lb@w>Td{6x~ont91#bisVln%gk6H<~8_ce4Wbs*f za;7g%8rEm8(MTSRm$mhM#z>z*GlH?!w2bZ|z9$M8!N5J&-$7UPa?&Q+-@>2>ERYt!d=7QJtx+Q~dN*03w=@+!UI*m!0g)Z1x-KHlkG z{U|D~yK{sr1a{wMi0v@UME9XR+v$ozWS;6DYl=$GKe@n1snEabx^)hoa6+AElMc>c z>6L4P{U_*!=6XAa`d~V2Z?$R>*zd%yzdmP*S1L&S+-eSuJdms*zyq#L2ty^^Bk&jVKR84)@`@IjAb>zhF8F29^Sf4N5qq!Z`db8u8 zA`AopO<-OK=BD!ttImlC(T!Mp0oP|nefg&$J{AM`Ij;l}17p^m7cTAJHrt(2upY!| z00Gq5gTs(Fo|`!<{v6t7GqJrVMTlJt4RS7XX+FKt=V@CFS(P4#rr_gMf}N#cvSRbN zMMO|ni?eH^zF@ldL4_W80`Fx>&$FMV_PZZtyjpDXG;y?^!z|5JIWU4Kcx_T}_pTp@ z*To4h%420M1%cJ0478BV6mF}af;`#g%#7}rbm?z?P`XhIH6kCP@T!g%bpz~g!!FV5 z6HPYJ6Q3$mg*p_hH66QzZpL90Ij-EWH5whR7lT=mLe`@6*muJpi+t60KiLQE zQR&=xsL0Qp;=R$z8Zfp`yWk{j7V9ln=6!|e(aE0kI9wmR*9xDjvEn1O`_|$HtL0P> zW_PENxs#wWhcE%^MC=#76NCd{5MvSghi<;eoHvL|CuEZcQ*6H{ef&ke@-2MHZ&p|k zI}$z&yn=B-74_@aXnw1fAfK-#*Tp;RV-+f9f|dgnY6l{g1MEC+R7c&p$is=PHeFQa zze4naGeU&HC&(84eMNR1XNudIvMl%THx{zY>dm30^Bo%-V|hWqJkl=LALeSP^)FIN zbgDti7-g*Rhw?u^S5PS3@^F$!+}v()TPXZI!xZvQyIRysB+Pme_Yul=mYYim+S~}S zD0)!QJ2LUT$$tN z8hxx}r^bj{o6%{9JOb8hjucq4WT<_kT2VBY;o-WPsOBSe63)?lC90;yOxcbGc9;utliVs6gB9BSqNShh+G71R7N_PuZiU>ipB zB|tHR_-OSWnv#CSDQ$gvx6fQU2vRV#Xg zv^XIw3pgU7?KaW*M^;-lrN#Ls-KPru8ukA9hx)?oCy04+G7Qk196;QfFn(AZ7ZWpA zE^?2E3*GY7X4OZAbpuTS8N#O2o!PqCaH!tHFqMZ0J&$ya5?u?Mnd*w5()Dm0X%`2J zsolnPZ$z(6-4x)F>(fbv|6vCH<%&bM4Ndn^3PzP`!}`KSv9f_bhrmhk-GML?ZkZla zp!h+l@~ao;JD!or9!~41E^4Pfo3~>U63hlaau<~|sUH<^-G1b` zJC{=g6u#lQWKnyF`O(^W)&m-ja+n}RRy*H>#i(~o1ADqbI93978xvC$1|3zloK8c% zmr$J=PYM|s^ynD>FuMM7WgP$3kJEb~iH&a=`*q*S%j3$tFj24;VdBQDkC?ru+Py-6 zy9z_plSjQ@a(1Bwc1Y7f9WGwcNjVRf<8W)Av7qp!OxXmcTJNr&$vj1vC7LM_K#dPmj0+X_mEU&pf7&+aQz-T{?Q}sGu=UNnx3=9wv#s?N6l~dY<+kfN zyzgwfa2jOSY_Dz;<4=@hWe! z-eGyMNv%kICQ}ltV-=q%(T+2h;8;jWraIZSf%%d}z~St^ly;9os=wB3t!;^GUzPd% zer}$^$45gV=i_C;W8TkZO|c8;6;1UsRb6`XAFuAz{ar(wm}$7aVLE%o7g@Z^WwX@V5F)0oE@ekMoqh;Bv9u zG*zVVYc%qMIQt|)JJw_~I0U>~`2*bhNe(7D{RAGpvjA*UK)FAcHZ7Sp?RM*Q2#{S4 z-dS>%mk%RFM4Yo5olLulB1R*9-jvK%WAN}AE>a*msUo$qUWEG%1MYXm8e&94n()o# z@Zm|F&vg&zbXR3l@E2a{C>tb?X;(2Ci!W>H8zUEZk&0Pyd2(r*IRXz#fb1hXt-4*| zoo%OK9Q+Na&)W?=ok#BHtp*ELPRmDAqng}qFqhzY_rpkeVWsNpW8Q>&i}^#m`aTm2 zpu^*-O_ghp!`#7>euFt1-s}8)wBdGx^Fp8xpi148P2Ga3<%t&TvbM&0nj_IIh!|@f zk`7bb1pjKZ@<>|u6ZwJ7lI0zH4;N&>7mf5m3ONd!r%a`>Vj`%|vx(0j}EWB_)Iq!dnovCE5)gw)Lw$uZg$~%8=yABfMa{t*5(e zsmV9Hhr@JJBTH0%`eNjfh_>Mf1jMK(!u~o_F@fq)|0G@cF21Nc zg04?bm0S*iC`IZ_vxiCvTvw7uR{__wO`v++brwB<)Jkb9^f6GJw8>3_#r(c{Y$nF>3L&3)xdjs`N@6^- zp0@3v47k>*M?vL+AM>9h4>gppH1;Eb6YKZSp72V>os}^K#SMPY(sbSqV6?O3xF{3; z^FITn!~k!VWeQdf+tyf}54R<~mdkZBf2{|nFt5z>#Q!X*_UEOcKp<~2u*KjR4bVq% zm>hiGMieZ*`*VZ8tRfI$bFADn=X?B%gSQw(IN%=7-tT=4{Z&s*KiuQc^A+=7HTqvq z6#rYN|H+fs|D@pmHL4s`ZrLk4ad~)nOqRc<>hGnrjU!UB{AWYb)OWtgZ8m~*)&^Rt zH|?4Dl%MAb4tp48AD8W1K!#ix7Wpr{bJQ6T*r3`{v+dXe0P{Yd<>%e~Rpf(#wT<&6 zWDsci_ydFqH6ZezJqOfJe>DAsKA>v-`N2Oo10u3A0FY(}e31DCj^6SO0fzU#p8mH^ z|6i1X0|xy7ntztwFC@OUg9F&Rb8V=#mWJ|&oMx>>Re30agOI86o^EECk!-DM9_?g_ z1rQ$OW|q79M`fv@EjK``8oQWmHCDnD+nIof?}1zwY!S|CB_FCj$zYDRrVq&=Rs5*q z{GW%X-~3*t==phaUU&OGr;!qV@rzFTkp*(tT(0pC?QbE)J$iM)A@kuD{stmOjG^jC zn0H(29{OEEj?db4mL|l!w&fOMX5(%6`9F#t|Bc1EDI;do_bLQAgz2R1L(q@g?PZ_o z2^@_`6a%qlpeCM>PNi`{dg?g1EKS&^_3E^WB~=i>GC}LzIS(F>K2@C>N0YhZ**YsT zi6Cr@R$|^=2}ZybBbva=p5f!OHJ9U5{$*`2c?Ynnwz?)nOf`V zH=Jezjy?P|i4k?5W_>(Eb!x+R<{HKn1kS77L)pqOk$^DD3Et`9Oqq}9*znf_JVxu8 z>W!0WvxkMRbS}y=VPz_c!qW?&bv+<#BXa?_4>>8i?bshmzQGF|h3A(y(-7Bu)#EZG_N)mFpEw z*}W6nu8=s_YZL<7n!GE-ag~m3!)T-H4w2h=flA(Vs@uE`F^@I<@ztnNMD5|QAYIDo z)Ur>F&(&7Jo|DJJ*2~qF$r0fJ)#+NBrc}_(#wv4KG(5rq0q&(CNFvMc#uv`;I$zb; zKE=6W-}PUZT0Pel_QhgtZc`9Eel*s(K2_%c986%v-6~{s7z9$f`=vQB1^}Xftz&!4JGjAY>(>XV$1F`LSU&ml=ULQ z>uM`6$uE?~M2?N$;j3}e;ZQow-J^IWavw_{g6;FQx}nN;e^iv?;Uqy`^ynUgM&nW> zpY=3w?M~~>_eHy{F-1`;AJED$4HnyvG{FDVHE!DSo19ba(4iLpi=L|sM6Jtw2xi^H z>AR}p;rnKqF+8NjzN?z;7jd=T?Oe&1r(m1)+QFPj(Y+7@tt#w!e~%_Cx0$9>&mLE} z++38?N}XLf_MKC#t+PEhsAl`-I2Y|bMw84*ls*yCBA}SN|3322*^8ItvPmzu8i)4@a2xnz;y^XxP_)!RvC-km6=5f!yIv(!yQ{l91P{q_XKFi}3%z)k0I z_;`vPzE~v821wcAi1sLOnMWBJdGEKfjeKjhn`n-!*%WC5kvM0{dtNH&>|Iw~Gj8#H z!|k^kr@oxK$#>cgO(4G~Dn;jbK&;)G1=&dxjpL%X8Es(oO`=y0QbT3D8J#$!GH$n@#4{fWsrhS5NyG7|c>VUPy*!A0h7OVjY;jTDv;!nW-BVfhzFrKN8$Mi`h;ym@@;_ z7iQji=f}69KHl`DRlsF4*0w3(=-PAL?@$)SMBj5ihXU7i+ktN8w5+jPM;`noHLCfmWua8&+&tuDGd56w& zh4)WW)3*;&7xLvRFhcm?NKRmC*du~Yq~UN%e~W$jT<4Ok7bUR`#o_N z4I%1$KG%MR!X-t}4H#d)ZNo7SvEYfny%$P)%y|=+`A8cZeOBA9P2K$KyhrdcuhW~p z<7>tX!LLnjtb!dml(M?CbHEebl;9 ztQ-nJjwVrBW!3Pt`V=Niv{;C`{V|sJsU2h^Xm4qwSj%84kid)g@lCr=8M`S<!Q*f&z%pw4jK z8+zTf?1zfzc}dc#Z$We^4liDov819MzuPyW9844Lv29w;JE?2E5}wNkaT?2xPBCmw zeArp;zic};R8qvpLjey9!v%SEJ0txJW0iwjE@rk|7f=Y2a@BxhlepP6SR#*Z%DfOK zJZs;x{1h+1t~)NgQz|#tijU7;l|e+W&Xu4N@LqfF?p9RlDX7C_sGAoe=UDAUc;hL}E z>&IsXH|h*xTKc3SEy+v<9;@7lW%3)Rn0KcgqbhzpELcH$$fKFQ_6=Va)PNXWO) ze{-Ry1Je;f!+98z2I>rYwBh^eZp6@EA$(f?5^@9T1<#-$)sbD zOVX7#_j%4?jN|o`_dMPLi<-fXm22oeS+sCjn@UeIUctn;4)<2{t=mBySRJ97O|CY= zjPBlsOI6-iAdtf&GPKqk$lO*~zf@pMM~4|}jr8tdlrw0z+2d>eQcUByxC_ZUzO*3r z@;1#+Z%|TsS3+bCb_QpJpfl=5>|+5p6>%84^!m+-gHQdt`d`N6xeIr!4K8WNK09Ly zFknzC%kCd?5bt*x&^@j3;%#*FMW+DpB z_BVnW^V7rWsGDhAP~uZup@in?m8U8>`YnyoG_v>@7#R26IVpzk3t(6 zkP5^hgPZ&WLn%<(c)qc}V!6jA9dor`Rws^_+5pnb@JOA3zjTJjb}qk*a(HF4MeQ*Q ziOi8RV?KFB(!ISfq_ANy$|WCdxBCmRVizsr40vB?u*x( z0uI8Zt?OokBxp{jcRkP|%S-hHwy2&xPu%Z}OdG3TvI&sln#__adweKCe4rMt`s7(p z9c*0Bp(WMHqwpmaBiGIGDQX#%NH8Lu?PcHrNXEw$>(SnIL)$q1V0nKZqdZw!}O-fOEKn@nUqQ+ZwdkS?o5-SfD06!h+Pclo=G;rPqO@ia4~$Y%V7 z6Grvo#DKs+nLru5X6_5U|U@9|2O!C_A+do`~>ir>GT zJFV|{dd)-crCVnp3_mP9?Gw{H->y=0W_jSg`y4 z>rWreN_F{ZAcy%RE}|4SA$eV^GbV%4TEltfqfZ=LADE{EIap2Lwk*#vl-4P(>k_Pz%?IxzvO{`d}%H&z3a%CCn~?%u_+FSCA?NeWSn z0;8VV>F#7NgY(}$P`;XO6Zg+iTF$+2S=*j|tT)D}G&WVL*CaOz7i@NCE4^=W>3U** z`DQ?qvCo~$A?7FP0pw|s7MFe^BUQK$79$$BrG(>Y%BU?KwbeED)!4P9BokJ9zAT(v z(=)K)HM??fl`}+reEEpkv}qjp4L~L-7HFy_iAh zD>w%9S>rl;1$u;}&*KBZY}F?UZ*=s7Gj7AteRy0pc~68sAgC+0V-{Mf7k&3x^%{$t zIVO8Aj(s)*O~xsOlv=&LUd5*S=+ondjr2T5MjIbY;qOfybMAh-#9r@I02{Uk( zrgoryn51 zIumr=jXfLcbh56x$Ii(DwVmANH`Q!JKsX!sb06{Mr!A@shTuLvPFU=~#A*(CS>4+2ab- zLw>Mf)B4C9WDtWbRtr~8EMTDkw?;brmF5?t+sVS<(rT+yKmEPuy8#Z%b9hq#HF~;g zs%aRnX+-^V7H)yaHjn`@p-(bhX%hFXFWGpD=d{bs%yEac^&>@R@QZb~cRiFwHL~#o zNj9PZ*_fz|+S%{YP@nq&k@u-)!uoj5T%WuyMt2vAJe<#X%|G;MNRV;sipOS-I^BEo zfvt1kg}*LScM)nP!GYOh3C&vHI~N;+3HqSXEkB{Pub(<~*h{s5Cq68QgvOgm@{AXy z(hHGEupG|&_(ZG4wB%&lR|8c@G38u)k=e-FQg!#fdzACWWm;)Bf32EFEH;^dabrOW zNhi0NyS7ka^Ggag5YfR0=dFcK=Fe)n>r4E(PJ*RS>}M#=O!l@WG6}BOcoOdZJ&!xu z2szJFBl4rH)0SrC(uqBWW9aJUz1(iK6OcyIJ1{XZsm48rwXTtIoXR|B+Qn;e61r(S z#7yUYEMzE`SUGhUjWV}kMpS;2%h4>Z52=hih830H1;}N?OMX2S{I^=pL-XpgV!e38 z@lB6`w)u2nv!P?GeaRAD!Ev>966a)^-Vn#=5{zmnt@hY~AwO7$_l8Zz5u%|6M~zIC zAdcVQcuw?=yPPUnUFM_QC{EU<%yq*K1~nwP(DpOyV#4@N=N4RgJ zrUmh?J7mFe4ZHhOP<5-E=8kY^ivq-{45I@L-PP#URQZaz1yq#j_sL^j;>ds;TQ2i}l63 z>%=t!eF)U^_XArMJ!BsU49pp`q{5@t(`;;lZx$R_r({G?&`0Dya;Yx}D4Hwv4iKhU zlY5Q#52sl_x&46Rr;juya)+geiK0=z5L99x4aypCrJ=h;NI88deUVZs&GtP}ZdHiK z^=d{9Km$YtvlaEX4y_i0$tSCshcn|oqY*U@-E1$D1VC3%o^pGghwt<^ASH@a7IF(o zQg1@2>h3zS1Ox}mS{y$YJ_w`8p03?ln1>GdeGA8ZnMzxNO&%C%E##|D$x!@g?Y7mS z?h#V{zNvJYkajk@^S1aM#H^&58O&vCvOicDBv9nnc(?sFl9EDPCyM@UgNUnVxr2$6 zSUy_W>67W)PPL188lBuhq{1M%J}|~%VBYEA6R-E*x=g}l?!vk37M{Q7hCb?z;m08@ z_tYz$6~QeTO5xAyqFM1}S65!;VPBf^M%XkI#5cvV8CfSB&~o0DBkhQx)WsLMLp%P_ zq7$((W>ze=QXIJ@l!5@oFFWmcD}S0|kn(s%`FuW%+&dOc^_v@&cR0DI(oD67x)1&b zS+kq#I|qh4GbbZM_U5%;mEPJbmFeqAg%`PB99dXQG|Gp8?C@xdQ|fr&E~C(CF!<7v z64T8SA>S(4+0Rt^sfR_sK3}xTFk-m!wSnU5bH6h_?~doKJ;qcRQMZ2jwC%`JNbGh* zvlKBtxgYQtouhlPBs%Z%3RET62{k!3*WgN!&BIP+iDvkGoKaHpPJ+^;@ouHvs;;k8 z1a{JiwAFN@VY4x_wtl4F92cFN_1?#2TI}H0Pzq|X*R;|sm>nOIM#MGUyZma#7Vo{y zhL@|C=*YO?Mf6Tp81=|21upg)_3tw$O75c^`b2lj6KzP!2+51MY(ha_HW`M|?()mw zGdp@Ib#)n`D&{5cy#Qt8i#9k(1uHmCCUbN=Tvf)ZDsT2U_`H|Mc4fPUpld;VF-!L@ z9!7)0Yc%vHS>GXt;O0*ev^*c-Krl7+?7?r0Ln2MBrr|rW1^b{kuac7Oyny`j4{Pz?eePvK+!kp2CB3*juyek>N?2k6FC@x{-9@=Fpw*s;puP@lagcqMKfHl^cEEg^<-oMJMKEJL? z)%;m?fp4TAAXC&omQ7=*;UYS%n|1~d>+oNQPfKiNWs!>O?wg!5Oh%6r>fyYGYkkUm zBI&W1cwG>eU*&Lt&ODG&DqWH*5caXxpb9)4*=U6s92_y70I%b{jmatQcm?>`EzJB- z&KsOeXKaR_;T<~*r+w#H`jm2RAZPnv*y~$m>a*~Botf@g-S=uRKzS>l!NvK)e?8$8!&2}Ahe0+SOM;!yX^2yauaRk&LuF$fA z1nJJ*&;{HYA|WkCx=bvGd)&63Wu(nX z*d%=Uop0&LX<`CXIg$BBm z!?~L%Do?HJ%hHj+G_RD|JyZ5tSOq+Z(~JguVy&IRzO(NI%XQRiS-1{A+a=q&QfJt; zYu>K~wh%d=8}4@(*P+L^wxyA%#eVwO&*Fa3X)rBb`=~5Q*r(ADL84jrs#I5i?n45L zbYC^40D;^Z63xny$Z5^sO0PWK)!chj<^H zryXT`duHm_vL@?zhA%qQB8aHeADZON`rQglqG*ttC{%Mwevgr}tWH>STQRVQ46M2~ zkW^$9<@Qyw{OEY8c~TR_lP$wTM>D zVkk-HH;^eWV7z#q|7qx0L-fhK*Xg`f9>isfN9V_nM>tkVM%oiT()z-%;*m#e#RFKpb!Q<$2xIIEPr)k2D zKy1mEB2j6z_S1)-(fbw#wJ_3ok&yLaV_UKbyzrb|fdf2>8BMA9TF;=;R)=}OaCz>N zUNlX0SZUe8x1P#!@KNJahJGFkJISPXhj}Y~D5vNzK|T4RI_r@MeZ!|CIt5>wCMgx* z7zvC7dgo>-URnx7$hxKXUoPv~&Q{f*rh%;$?lCDVe&p)fvYM$TJRu?GkItbQe*aPk zCo-wL{MGDm)!uh1oY}(TE$D1zSx0?Y_iQdU>CD!icD`8JLnc$0oDSp?2h7K$k9b(N zTS$}|iTt_{DQUmFt;5aw-ckg`t);+M&gJtAr@dc3WaUhAmN>KEj=vUfR7lmPN99@( zWLIx=cK*(yLI67rNZS&^TN?s6AvSWOTi`3uvGo;>=;tHkWI;P$+hF?rZsJWeS;Equ zQdR)mM5l&Q4+mjRzS50?=_QkjW1O|`Ug6AOeYF(CN zvDmHiozw3mLgup467Q<3%!W58DF+sEKEE=UthRW_NGqPfvEca~KHz&4{UE`UFyMhZ zR>z)K5WMDdby;_~5`Tv2mu)T4Goi4?iRvUcS~}_U&-x5}W3TgoUhC9D&~7DOOEA|e z4BGeFZdOFAvyTBM3Ad%i(wMHnm0LN1lYrrAq^F3YjOckttWR!?(2Se!Bi=OD!k&}# zyczapJXhvqO6ZfadhWns(yiLT@NCJq9dfw3!={6oPQ+msZ;ItU76%Zrma%q+)ijuu zzkRRE+?~tk=-_#@gvOH{Z_y+8?4I}t8xO3a-gZ+6c3-fi(M~P(4yV}L2Tu^=T ziT5@3(g;u?r}PlUcN#9wUkf1kWVo(ifVH685RG4=E|bqS-cE&|rvkbgYNyw22^I#s zG_po^-|GRuQyp;xxoBQh>nO{^(^!IP3ysaGD<*AUUJ4hdJY@{wzak6# zRc`d$cb(N(8VQ0~k4-Qpn7|5AT$2N{rQUW7Fhg=R1DKUE>(fxu9vC#d$$=3v>o6rt%1$jX-w62ZZi3(ZMkgxqs1p4TU{@CxrIUaqQNxSZI8 zdf~d%yvMxDS5+cjCF^29a7+~vE{;S=tI{2&TxpEqk+-aKVY0^I{)J8D2;qJ9MQK*V zc<)xj&Gn{ot(*m^NTE*M(9Bks>pKR%mdCi61z88iZ6q9KL&b+V}df3yDramdQ-gXsrf7(uk_JriY3>7(6DRabF)q1eDbye!sqi=#51`=-o z{=~hC!9;>m6rqN;k_{F4mHvvXoR3stT9ITe);?xSAu`1flYVuxxB*9Wq}; z4C3RN)0CdP(e@Ih6f%J2M@5>a3&Q5Z8|p9>&pN{_`Z~9K%ya)1_2+_+LqxJ2y`=*C z>m7HiQx=GK51CHNBCzQU@?xk)IqJN|Li4llm?Qbb*Fkkz?iRf-?&}2?*m;!Zp%5= zJ81W6o2ub^@pp+vG*}mn+7DL~W;YUcaMi@>tLs--e7}!vZ{(_vYkjW}+vfLvV@odJ ztSNH6qT*q|y#3_pewrox$X2Q>_)>C}eqVRsWJXC1n@H&M+JZP=JxeO=aAIPzGxYUA zYyn7|APz@FQ>xWEI3LN;(y)ET0v(JZj=j`$ag5OA71pnV=*Q~Uxu!Dd)=PtVmloLc z`G>BQ$arm=+s2;oxKcSCbBj2lP%e4(#}9vJsmFKsAnFi1CTEidYD4c%8`qf)oQW@f zxZT+yB;?pU9V{#o{mCEAGLmg&Z{zBvTm>4LREy}&vd!sRmx|QjGGD+17&O?cY7@l5 zCnS8j1-;pDO(ZuPidHYl^!d{AUc*+9dZfFk>_+hgHu)3W&b=h?GmnJDRg1kvkJFZ+ zvB}ivh$l+XE9Kuj3ibpubkKJ zSbwJD`v?clqalz8=JQ$#j}}pc%La#U59YQ3p2#o(@rtm;Omj{n`AvieP7%U6OUChL z`;fISsmkn%bjVD{dVOh2q@!)W#T6HG;Ht)AIrp(sfK*BYj*`MQ36m*OJxp5^(adzV zVaEJ833M0%kK2&Z1Xs-W^E9VgI6)%GgkQiI=_cIM(($Z2tqKPZS>KH@;v$j}9sKIg z;s8`o^x0iP`EJ3t`}uv$@zs{KU>3w*{>C2-MI&C+Ipo!uLGk|3;^Quha~svF9cQi15R4NeJqa}n?W zECL2<$q@Vxq=K$9D_a$WSp158`E2^rRVHJh1HO*4*)nl5xPDk3OgxN{LJ7I2}yTl>*8cQAKvhq)sj8ShQ8)2G^giv2x2~DO{dHzfC|yR0xL~ z^+XEhJ$$tKx!$$z;`6~uFLRfweiEb1=Q)T=^5+D>j{Vi%RGp4G>-+A&3nnGV3clvZ zJhdz4@|)AfZNDNs*b%#%_qV8tOyC;{&t>&H3~|=-up&Ea);mJZLYvz@S59rdbKrQ8Z6JjHtcx;r&^J%03#Z7rL``N*M+ee&NqJOA{LfKBk=J#eIY zj@1A9*SD}`@x>ao(6QU~_1`l$z8^q9V<`iKF-~XfP55tL-^J|)O1o^Sq!vtG)Oa6e&WP|@e=Qp z-L^+KN(FVPfAOh=5^a4MMPSk*L?nguCvVlC#a@gALoD&kRt1uzKnTh9fjL>we}BI81AAnD%=9O(>Hq!YoBSLw z*yNE0JO9QT_viWjxDF&-xby3W{^M8s81z?(4KFi19{=4g@t*^ceu>;JNuhH>qP*2E zMQyim8O=idd?;OzuE}jDHYe048aU-#q*}j^&RI=Ekl}`Xcp{0(@9`~hd!|7mo<)oP z!v~GIla?~jy{GEUu{xwYPpGH>gOaM!ZCC*VR?YsRsVrorH%4}F21j=E8x6JWq_rbZ zh=AYOzV%YbCnAO4X$ETRy(Q5Z(&sfn9wq8+Kr85GeK<=H&IjK-g~mRli|DtLB|zIA z#{Mt!wy(mV?*G6Dckw|RjH1l46>%GCrC+oQn=KejftMsYoy)k%WYG?`kX>9HZ*e$v zyH$GcFqG{KSA+CrVi}fen4dg(M(f3`eX_k^GOJVP@iOEQqitOao3L`9kesV+3l@aS!_umNa*ddQ1l zT?y+E6ZJh}-Z6w8pO3<1vGymF50@yIFXIYNr5R|%xXi~@Jw{T(DIJ9l+`Sn4fHLut z*Sf7Q6w*a#_V_$9*8Gw7GgqBETg*g?-<1N>l9e(2`IXIy zqM`XWvFYF?JmK}n_e2;hCp|PR$3fCS3dLF$TG6ioPMV?~kXH=9tq(jl0>1ROeV3+2 zsW>dp#2PZ2bq#MWukx4-YBlwh$evPea7r)dGH1IDm%2{o^vB15i-a@k5|}PtzGo7h zuAOHHBW#w^_r8*69vb&JV^0#a3bK*9{pqWE*=1*m?R5(IYQzCvPVX3VO}QiNid&Vw z#$fIuFanAEn>ddB1J@3X`24smOggpO0+Em&v1m5HE?r_4L`j~PvvBIjf!;lZb#S{kKIL&?OJaPZw4DDc#Uhi z!K-L6=g|Es$U87;wUsq4onT;M{oNhkEXRYtcP0ISM`9xnGT0s>C*<0jD+z~YNnu+> z656TEoWWs!y6ot(_}ba|c%}#Itoco422~xiwvD@|Iiag8asI2(Qja;)1)p>4*1L;i zaRUC%{b`pQh%kqzy+PSdjtuPemK#0JN3oOB^evzSofQniJ!g|l{9jZZcN>|00J$BH zK{G!p4JClk$8~p`kd&wR)e;-<*48*`#WdZ;3M4_tm3PB>OAX(S^!KMnM{nJxHyhGz zT-*C}CnCPBOSM3`p!`kTN_#LlG<7(Ie@De0Q-}!s$DIEPz5jZyVqYnNFa~<5N$ocO zF%yvLz3Il2r;s+@A-9mP%cNClTJ;H{Kq9*42Akq}5-Hy$Dy}?vinK zm!eKVj}ASD;{+o;N2(%ATqEB19b0MiN7(4tG_^kaSZz_9XmrtjNJ~xKqT6iWuDYN4 zUpBAns(;-vr`35s+W1SN%(~M)Z|Ys8*Zm49LZwZ5d~QRois_VD%sn-WPrIM%LW&)y zNMZnaimP97n66*4JGYmR>P=+NQf)kB`2l_xDf&Lkdgm)yERK?^U;dl4wfQ9d@HN+8 zpq;=!Jjm&jnY5n19HLcup&^pg(&V<~O~9fb1wfL;a1sf>;}?8#aGpZUi}y5101iou z^Ks>o@_R}L;p}b|H!dAaSDBe?L0Ov>5PlsMy{k*zQ?@8+};Se;uW^G%I+2}^JgUK|kFodKzcC00iF5u?>wt-GqE z8p1zGhV;dRr^n|GYRfKoqRe&6Cvn6X4}VaBri!?9l!X5`CEGLj5wSxrz4$;CY&^Ir zdL&1duO+o9bXhN!?JGX_cF#eoAp`{8EheYtI0#``9?nc(pS5xSywVrfQ;SEhs+s0> zZ6Z$3IduT@uCc*Tn*#8Mbn`uGxU)B*YPL+OHW}#1;blBik}Zq~GJPUT@E{X~{@qKi z|Dp_MtoIEgfI!4dl}{oqztc*5$7{)zYoeNCtWRyqjJ};y7d1sD6CtpHT<>7%!<|@+ zuf-)Wgf(9<_)tBYtoHscPj#n2hz>D;T)HNLi*=_$DNIakxi%gbL%o)&7;94VZiCPW+xchjCjT(*MKWTL;y(ZF_?WPH?wiA-KB}BuLO;!QF$q zyC%3pAR7qo?k>UI-6goYeT(zn{q8yUoFuQSx~jXYzkgDjowe6oYs}$ujNcf-3&KB& z@XkiqySuhM8HhPP8Ea6nxYevb;0jPP;9+ODxZ`}&jY0almC3L-gV(@{0NI=@^s&u+ z9jz;DXI~~9pQRsJ77Q?cp!T$d!1 zz7=Wm8^?o!aDsRFvw+XUL+$iMf8dMMXqH5_U+k&P&3F%o6uD8+e3az9-mkZNX54JgWq8r)YX zZB{#uord;)M7AFz*#;KUYzh7DQsjy!y}6&c((!LO{%g$lzrT;d1&&DH?SDRT z``=&v?1N7npbk?TS(34T<6A`V%aH(Ym8&5OLi+8cf0*=&_y9DLU%H`yS1R@qc&pcy z)V-&~zq=Gh^{K+{V$@3aH=M^0_AV57s|*KWXTINFDj)%9!eLEZ>5$*pEis~8lvIZ3 z@gj<$e=Iio^Ii}c!vap{tVH{#f6NX1S;k+p0&f*Hi?MP2=WqY^yKMp(&e3<8GaDGUd0vSpD z<~ct-RZ4$)7q1unCqL#Nc{Rez7f2{T!(;v?JOiYLh!GH$can84{|9NUmw>k5&@TDM zVue2^jrhTUM{SeT9o?Aztp<3#3?yhepZf~`PO1QHB?$;CYBXIJ+3%DU(Q{}(SP`*l z{xN&{7n1So0s&BTf*mK@Z*~s&6$fB0tXs4rf2XT}1Yk6v0y{GLN`JorEdpL`A>dHK zzo%40!jrZp(+K+=qX4%;1W5IgLHW0QOyu41iE$@6(14-1IA(a&NEd(0N;y1hGMGt0W z*D}jBIc@R)FzfL$xC{>N?{SDL~#%`odnqE{)sIi z)4uS2@+XLj|_dy24oCuo}=J$I5K?X`5nL3 zxWlZ=(Et0;!2Y)XR>aU{<#Tu3*p$I_+HTq7U1&&$J6h6{jwS0q{?r1%{xcIu2^zk|2Q&Fe zO1A7*3VzMdt76Yq`i$wo^idid!Gsba|9wW|lgz(8nh7LP`)xM+O|Bi+vWQY>vqk@5 zzu%=iv*{gHhft>}LK8)PO4s>eeV5F2IsX_%qrwD?kqz8i@@!G)+G(^(6O`L1qKaad zA@~#&6tO?&*u8}J5ScC-=Ctre^kPr0PSRwq%I%ua=6px7e!uvAgEAwKMGQXxO3ye| znjd657c6u359;&8cUVkxL%i337C5OR>3Uw6^`R7*gW4?%%8Y}FZfYg{%Y%iov+50- zo%Nn153rtS^7sG*Jh6Z?ob@Ej=4c>vCB1oh@L|hCDVBSq{xG{IO(+oU-pQJmKPv=h zV>>&kx9R@YV6oxadf`6qd?D|xy zwJ-g!Oglv{5)tWjICpisD!E*eHPZ6j05aD?ZpZ8h5Sb(OSQmPwRYQOJToPw%?HO8eP6lb&wh+796kUo=+Vs&cM^PuQ(Dr= zHP>)=9^_pjQmNXlV55bmSft(PAp+|F00A&zhF#+FfUpNR*w(8c|L`5no-i8d1;`H^ z`bu7XwmrsPikR8u?=+n5%C6BQ@hKF*#eFxv_TA{{a_R7OES2JTJ?OpeOfLrY8gswY z(w67G7Fy6Oa@^Xh7@luU;972_h5FqQ#GT2*ngs9vX-QJQP6XMLHqO(PWpJ#!8!i<}I{F z!vo)1we3+vTAA^Pu4O$`fp5fY!S&cl>P2;ea7AXjF`c}k}^XLas z%0Nv4B(bCOKQjpLl3O(BFC#;o1v063T_yuQeusH{N+X0 z$BR5S4tw0!dr2>$dk&oon=^(~_`^N-cQCHc$JlK4#ro^K|=evw$=i_HG=HW&~2^-0e6Rep!1#>?*IR`7XD_mb40ct##?9+Ooi zO{@ksG^1EeU`vQu!a(OV;7FUXh*3&e(4e_0qT@pZhVd{Zda}PvsEIm1i zDEui^uQpXpCkz6s$6b8XRA3GE5~a*nCRxJ=4h^V_OZqy9roL5f-{n~(O_*ixDLC_c zIH8`2HMy&POgG`3g%=*12sV%A!{`3f%tegDW(-AYy_mnx(Wg@vZx&~Fn>eH-mxxm* z*k)FusycWD6um@@E;;e3-TX(bvJ#wfrX%AzQsljvP6Rjk{?b@s`qL{qae@+bsxZhUz`Hlf!1b^LE&9-isU- z_q#LesaKAC&=B>Hme61<;4&pib?(=Icos&tX6=#<34OM0%|-ByVEx-fZYrU zs@c2X_PtYTrqduRZkk*SxM;UL58hYTLH>GrM3Xceibba$A^nwW_mKCj)p-TP%(+me z-?LhqKV(i)@ak@Qht>3{xBt)}g*!KZMLfoTWeE#8hREWp!j{P2GnU9bjz;+G@VTQ| z@$&}xwG)YtsDVoKl)P^U0;WinV0ujg|5pQtlpZZHk%cicvqb zmkf_e(4IJz2L-_ZFt;E?N1qGm32UNNC)~~!U3tj!*K{C0Q8`n~$<5jyf3; zE2Bg#6(#<|lqR7E(6R6I$IG$DQ3~+`S{%afTW^kg9k*}?weVNx_uyjsU>26fHmR% z$$feIxejSnz>#1lP-wkz-qco!&A9`2|C;A!GG9r_@a`ymMrzC3+h|-m7Q$`(2ji%K zGZVV}qIRO9wEtKy9OUI`i-3^Xw+rrA(Ox+7l&M=hnXiDm+84AUWUrwW8w~S}v@M&M z9k#!Rz*!U&f*Po4xg-E`uZo>ltjzOpA-tTuFrPC3z{_K%hEy9Z^+@689S<8(v#wT| zfq@m)*AKTQH?{@b91rmLTw-#sWr$5Dn)G6np1~756IKM}@MAU!f8ArPL$q%eR{x~X^=8w7Z~A#!{E)7mQ{}kt54v z(rTI=nhxEAo0AknHBe~7VcxyWskab~vO{ZvUtP}Vk#9H%Jd9WX{Mch_!WG-~>&DgE zwP9lLSY?phBxN`Qj5-Jk1@J)cY~GT3242RD9cj##Z7|D~cY;T;ntr%nc+TgUcc;=W z8d4E_Q`#73xL4yAXUb#Me8SL?+Idw>*F%5Zmq za8%PgP#QsQURl?5(6nW7kK?+N+eyXV$1p|%d#z>+eJ*#U>>NW3PgL-}HQ~A}xjtbD zUVRFKT8T5s%z1ax{HCex66!#2-qPEwr?`1%UbOkHjN!dfwS$|Cpx>RQdtlNMakO4v zAqo3`21UznM2KXrpRAN}yp4q^fP!UmaB~69F*d?hz8v;e~wKgvV(2Rc^JgJEPQCFu&XCU-E&bzM_?$CR|apvH~4L@s^RyBKZUam8`@RC4)xKIL-%uqwP7!HWHKw%n)~ zv#sG=2p1la;aCMxF$W6ELV_rR!=U!q0FTC7f`panF1*(Gp0$$Hx> zE9ZO+WmCTentC)Msg3Ap`-#v&xzwnbhBHdl_c$ zFh|uoj)r|iactuG*Z0;?Y+$Fb9nNPJ@5A0;Q?sR{pKS~6@lgP1pYP$`E043ri7Y_D z3`i1K_*S+@WY{D34i=pfWbPf zwSY4Vhct!jBi$8t*ju_blJeb+3SJp_<%T-dI}dL%Yux$-~?o zt1|f?Xzl1>`sAB09=>jHN`jNR;3YA4og6kkN7xddk>|D^U(nH>1vv;`<^$<=yRr@tXFST0+g|UX6y^}*mnJz z*`@PvS+lLrsPErPQ=>J%(ob@dSkaO=_0N>ZYz9#%mu3JnQ&1|g|{vdu9g-H77*W*vMs9<$w#iM z6l(QDq`vSdy4ofPDVSERN);+$O_b2xSQsY=MMAMA z9e~c5Ni&_w%k_s*TS>vSp5GCdKTHK3u32zz+Bm}t5 zK?JK?DOfwF(Cm5%%nAKZ7$VnhU4m!bC_?+X6@6m{pD-z;a^42P9AO_D!0h^6zj8y} z^xictP`|cOJ!76LbuZ3rQ*B*#)^2zPwx7hV#W!cL))@+PB|h_>b#ok)P5IhUCgXBh z_6^?2X9$2Vp$g9sa~E5Gw*w>{=(;Xc7k?Pd1Tob-9ygY~iE9&3$Mz5KpFJ3K9s9ub z!s)8zs5_Z6+Wg&*B|+|zcrcIPV->H&+eL|LWyJ}(B_ti4w7NsO^O;UdsJ|nDEN}h` z21J0kVo#MzD9rLv!cvGspa?75U{Fi4(7YyYvRIufe^OulRd6{{FI+uq6es@2o@Dlq z^`DrGC9)p4F`F=j_j9AO=kM#x9ui7ECJfDh&q=W^MZM+rc%=S@@@zW`_ zbyT2;o+;lFuXmvT;oqN~;vwEO;|OBlfeG1P*!rwtT(x#e004m~J9S1M!URHEw=rvh zNBQ90p5((ax#hSjla4u*FQItA!qqd-$kD4;OiQOVtM;=HU>=<)*H}fG3kh1je%7UX zJZIUMhRWlE5)i_-_sR*7%$WrLf6UC7JObvi9&Gx@1kY&;DmIqpESshZJ6=m57(QH9xc0RoBt&~C`~ zl8QH~UdvcJrGqz$a_h&O5(OB-^`T_q#N>NTd7ZO>ne(i!a z)YBwLycvzHL`5qYfMSHit1zFfp`4WDqDFd&X7geyRj*6>Lxv9po6ES>el3J|`Mc;& zt{A+q6%(OA#HrQP+{c$=k zc`5Md_dt8b9;n&qG7Z3%9jq{!`-X!xB_WF-5f3lN8bR0xb%H}fc3I|z=azr@&ohT8 z8t>I`UL*fFH13B{!M+b0kkRRH4crHXKl0M5ZH82CrIbsawGW^e$;{nwV2rCSY!ZsR z8@$ZhSy(Q$HEw6p2oTe2%fp|kkQ>#15hf;V&Gp9NLZXFq_j z%S~8QKX7J{s3CwA z(-nOV`X*^ud1VH`(dasD`U`NpT+@l*{*1EerB25{gLx5#%Kn6WV@*WRhHL{1m+?T8 zPC_&0?F)TxKVjabXo0S$QYJz_{e_DLT?p9<_XIpt{#9Bjd5YboF4Ot3`(dGtxR*bp z(+{)hqwtyWMFnYDD1^azB;jwE^ZSUUG(G**4jl^}kpq;E#$;5{Ult#TW>ff9 z9X6#{9PaWE5SI@V5K=1*o}a;NjslG)w+nuyR)mQWCes$P6|jrC*gUV6OE*|xhC5G0 z-J28k`44H;Sf(_NYEK%P@As3a6wk_GySr@j2Pm0ZNIFhEgo<$8mX-k+n59~P#RxcU z6uy0CS_VnqA7Bd=;=)gg?2tguQl>#qbjZCr^ma5^9@4I5rHRDQ3b zs}z^kX@gtoG2cil@*06mvJ^Xc?@U(*)Ggqld*(7gaF`iC?3Q6>nS*Ap+c!|^F;F02{*QLmY zZp+2pHJoJS+^y@OTPK}SnQOcxGX?ZFtk%UXA zBlsv?9%$@aq>F@qTH$4})I4Jqu4zJbLe<(>?nxj{UqnhSSq^`D#{LnTJpD%6%Umwy zxf87uZilMhH{W;#sMz4nBz!JA3XyX{>JQUg>m{EGS3IcjNV9;(Tg%b~T8i5hZb&c` zo+zSe56%m?L^izunmdC?Lj)97K4)Qzv+|c%mfyC=nkGOdAg?Zu>)3OuGc$>heZMQ( zhTEb%pLqIqBh!wY5kAjw_x-?#gzMNqLY}MsOX6Ob$Eof6BCyEBzt_B3ox499glA7Vj7>quqL!0+H5G(jA%&l=s?y5{7|= z6l(GomQ8D0U1bD1p%xq7+H8d(I${_%DI;@dqf+6)$p$YfPx}lc9Qy*wsjhw^x~DVZ z5S(5s0Fsv`XbqQNxz&f!rr67O1`93+PcbJGPzq%EABwHGGHrTZ$FTx2k|!JE zgVV+}CYF#B*V?;y;0TZNhcLtEJ}20b%jiY-^>UdvlS`3TQv3-54H4l+BW>SL2CA1} zH}TaZehk^9uC10E9iSL3C3D`@t8)wU`H{lEmIj)bPey&aKBJhR-Bb^AdddeF!8O>o z#WKuTwxnV=Ph_PuJ31}lv4)H-5M|!{fha?{(q-~*K$)h$0Lr*N0m|5V##w3s zr3pUkvy~PTqk|A|E4Ewx6>}hes*fm`a3Dj*-o(f6>k!BDPj~LuB!RASwxkhp;PbQ1W0ByJd(*wb=}6`k>Nxpn!k<{K9hDaJ?1h8;C7{^?j`_ z@??+GRVrPe3m7k5oGP!sWr1k8=E<-Sl6ZM`(u*Plco2BEfiG5DpuHle3DIQgB8@m# zJ91)a|LsOlUls)p^UcFa)mMwuW%xte%?j!YwbYdt>yG|;aN|Q2T9=W_{S|~Ze)N5W z!KEm4L(?8zO<8noht?=?dd!Oh2fh*SHa*$}-2lk1GcHIcyC-yo#zQ+F2hvGkSenSI z;-Pb}Z^yHMre(`llNuAA4 zYq=L#bOrV3w5XUssYMFk+e`aJQ`md8pg@!(wWx%Ep%HTNoE{KwJ^$NwUldl;$r2>q z*PEsPP$K0O%)i*ns^|YI^D|RSu?;?FiA9=vt)28MdX0z}6Cpdggnmfz?rrx2)1;p0 z>=@UF>`Cg0St_%~S${^qM188ZcOA88HWw|3Jqg7@8GUhT;(U{xtc2$btD7oXHNCQW zHEdVxltAeQ+H0j!)yj_+%-%<uJIiR9!H8cL1@H!fa;n&P*` zF*u;k)Lf}j8$V7^d7O?l00=v5 zQ$tE?g=(_z}BOza{C>FOEC zs@8__LY8?RaWFKMeA)H<&5vx59a#{A3Yi=?a$?0C zo<=5hp4m4w-}FlOK&m*ww=Rcz`5heQC)0c#?@M4d8c1X=qL*B^?_L*kvAkKuhh5Nt zX+KqpF}B`bCDjOgI$=4_DKx zqew81Mfk1HC9Vu^jveHWLDkmo6Z;GGQ*5!v)>_)qozzsN#jWLqYTMV^rLMs39V|*| zX?EOW#{Pu`p!E>A|DqHF?nl#JQJ$jtSh2*Nt7j?+-y8$U%k9u-oEUsCxME#`jg6dznD(yIjQ~Vxipt&^N=8XESY7O-~-9)*0?|Hi1G>#do%v376 zK?%+Nn@-2h#d$@$1&PdO`D=5QF8HsUKmTw&{Fvd0GPxU8@b@ak8e%ZgT|Uc8QH#V% zy}C}P!j~6S`_Zfg1t$^K!q&>HWzjFp33KO|{;%WY5V?>NnS7lxw`T+9eO8|0pMO_Q zWKn@-*3{Gr!7*K%A?ZM|sl-k~jeUfiQ`QbN3d|!ZhAs0aH$*zSmkNasM;-H2dP8X#@PAXlBB5_^_xgRsU49-U8 zx=ArbBn4lU!#{ieAt2@xGdJ$If|yyX$E~p1wZmFMA>aOJ7BHnn=QWb zuvuv(@X#Pnf1C=VM658M>l|EimT`rTB}yW5MDLI~<^0^&g3OD|1rH@d?dNxDSov|H zLJ(F^1_x^K=1Uz+>@EzHH=Fy-84!jFgbd1RSX;7{*@%oxUBc`5eB-9t0-EZVwv1D_9b&_U*74mcw9rKe2L%U@Mu=#LdK!W z`t;Sd*Kb@7X?2bz)=|2L-O1IpQp@e7VWLLy7US>!9;e5N3R_1)`^ZJ0%-OTb=@@lx zzAkpx#9D!<01*|norp^^nL!$)Q2T+137t+s>WJbjP#fE?b?e z)$hS4w}-8@R^!Ew{M2y@1@ASVjtho%fWae7jt4v%o!_Qw0C1cl)n#8V$jE%DBqPP3 z>YvrR5K0W3BvvRrg1nE0bCe!o-NS(r(UGou+my*_@jLY^R zyzMjqQiTb_B`wjvB)Db0D%ZJ%j)d4^r4>rR+P2Bowgm^xtX=ksZN=3FSDO(?R-;|BWqV8*!iSS!M2@$6m6LzLKy1eYkpkbo+<> zvoUPv>u+XP4qh||Gi?grothn3XC(~BDu}D@m${}?L514HJt$&{>w`boxYM$e_zbXq zd?-}Kx4Mwe@<_+J94QqEy){@)ZFNYwz+a3K0KeJsx21V#0Lqf?%kaOwfN@5TPbOCW zA-L*V9@NP2F>=XvUVFOp)q>2MM_sHEuEyK23fFB6-bx*+*XV*_IxBe2xDiR<^w~O$ z4Yz@WGkH4g0TMH#nRd!7L8yxfZD@K&%f^h*dTxu=%?*!>^{tqb7W( zSpEiY#XR+Q%{-UELl;D>&AlIaoH&U(=C)GK!x;P7VzZ%%qeBYiL2PhXbfd4J8a(@> z@3Iye)_8Q2?CvL~9?3}pkw!Ejj>f&G*QKlkyR#gZcFho1&hanTf`QX+`XR*VnCwv4 zL!;R^IyFQ87v;lz{2RooNAhP#`P;7fi>8w~I!Fk7$d7I+(b->27%^9%v}9B4vHiYF zQ!PcZgVWQ15psLXvyt>;VEUnmqqLFi9oL=juh#7UpfCffG6N-jIRbB2aY%1gH&~pQ z?A)@WH9{P!NEK?gexEAQekesvI&y}zb%MIl4PpooS%=q}MqkGjjq4yKaTW2r!}g6gP?{s81s7E0 zl=F;Urrj}`erTZMhW0F3NWJRV8T)ZU*ej;*?e_czc=bmo^DoJ2w5lxe z*Ki&-rdv>dE4${ugY_FCpmqA*IT?OP>Ev7vzKgljz?>_{^lsCK6k?Ii=+Zp4J4j-& zuCG7-@Iuwo<&iCMF{@#!iW6T~g!XtgGPWAw4FFYcQ3qZcphi zOH}&e?#aw2N=ts23K}Q6ADx|D!A3uo!!j#gxB~?esuV}_&|A!^m5DcE;R(?W&j=ya z+c9ZVVo7^Rptg2c`k_}lcK4S`2+*_e!O)^2v}%BU?rq0@@U+3@w6lH3rTWV2xfZ^{ z6n9O+A$1ZX&-wH*qdOw{DpwVHIqtb+gXDGvjHaPeb*oUKs?>oophQqY`s-&K?$D2t zMVPBPgF}6agqKGfIl~IF^&?`-3QSU&bw!oA$l>_GIzG+RG`gIy(trQe6X<)D%CW7FBAyVcfE z-V{z&{3`2|y3xZxV(jM4`Ab~o@!Qm?XwBXtPq;B|Eta{;z`O^ws6~5PttP>ngk1-r z-C8F51inkYBcxB87tqah4F{}Fo-VuFyeXr7Lk@qPM(14t2JK)O9}1WC8Brrn^F7(V ztQn^!q|NvE!if32m@s|-l4T$Cl^Z+|?j89pWAlv0A)_bpCRoK($OZC@8E!gOzZUZF zRWe%9p;fU!V9-dw&bc8rP!$j-s+fGiHlovC5or#TFOyAUaGmVaOX3ahCO!LtNSz&O z$+h=BBV%d47?{r$LyKmK=XBrr@)bVbfCv7K1o#JA!y(uo0w@Q{_!os~iMJxYgti0o zLahkBD`8TWDIRF*uFfwRyt7HO$VD=C-3}M3_e%_1EcR#8-UWWjH+8ENuwAHI?>smg z%$~C+Na|Ij>Ko1MdrMwE^m!M?v5BNsCXlv3wmbl+4jzVE7U60}gw7HY-cA-cl^TjR zc70Q8V=vZVC?&(t*nZOeO*r_~qLAbKO2bZx%dD>iUP|TGFfeceF}sGPYsXbQ1%Z0N zNHGN=@Rq>(Qh3Rht5~YyT?#%z|B(@pMrOLSlkKQA|JE2kbF30}@r5Z$CdC(5T}DhS zVt|Yz%XV0QQ?qNe+5dpI6ey^_r4gs+&(plY8PnFUXb(5+NND)i6s|vJ0`W_uJ5^w~ zqugEEFQ<%nqD*_vuh0=vA46avag3gmGN7Fy+OoegX3mEpLIwv*17l`7eEPV^)n>G1_m}%V-5j|Om_rD5i0GZjAHK}`bVF|(puD&Vt$^fdYGOx) z*P!X!f1EhItc;PWHC5`emBap@W8eO@3cpv&!MKSncoktdwQ|RS1~~&-{bj(Hzs(x^ z<1K-I^!b6~gA(~Xqtpz&H`JJ=u1Gk&G!BOdtU}MciM;C%Yn48u)4hU5Mf1HV`TVaR z{dwY?S9eBbV1|hs9Yu(jYJ{#lFr&(B5$hu`n2s1?vPi?IY6l7SpJp=xBJp^DdTnD8 zrLMkqML;8tFz$n%1W-ozwCO$dU&ZI&zxS4ekz61uGjxoqpiPn3K_4QuzGjl`W^@V~ zq)cy*H<7{dVJdY8hwbmUxKDHV7g7H9+&JjNEgeP@b`#|tUDFwbmX z{A}f?XJ~;%POwI8c=eCNE&qFw85(|VvLtUxf`32s-#=La@0koy{L|+H{)3+l4mqL6 zQ2Vp!e@qbn%b&^5ym81|sei@|PtO!F04!F9ld$^pDZIeoO!9~++9O;%uuSo;zHK8A z2?muyl%TbBndz3aN~v3-wMiTbw#S{ z!SrVa{OjggW&EL4ttjT-1*21bbUd8L;iEt<3ehu3sb5T&-69)M?IzWrLvr2$&ZK+T zwLHj}v+gl$9x;^8Be~9T{pPDZA3d&@7ufYTK5r~O5HuD?<5qrj-A%)+vY2^Ah~U`= z@o_R>cHf*@^IOLJ^rHji`;ye^0b1im(T2w=8g+&t^B1?O1~pT`|0+v*~sly0jR4cYE@7%B5p?x)2Ah9e`k^Z8fHr53K16Tt?WJaB*<#H+}aBSiYJ|H5k8-5BD}9; z`8cXjr#`s=^N5uQ-2SZ4R`I*$zU!}auy}U4a+zNiid!>H$yv@_2K;!Q9-GROR{|;qjZ9CD0}ovUaOzv{Mr#e4?r-4m)`wTx#AB(ZW&DUV#sBFWX;u>>ZfGx-G_b5d!AN4$f;$QP)4PQHaeg_4q?dh zEd3og^8<_L;!Dd)x9b-u;LJ-N%yMzGD(eH8wwhhR?NEkf_k04K_Y9qy=^!zDDRM&8 zpQ{o^YCy9=Sx8m-eW}94n=CHpH563bPN^+_yY+cQrw8@p_!$^8&{I@8Vh=Z*GOmkBVw=2>pH%0ODMAdw^o$2aPP5dkLGZLE2e8 z;V}RvZj4vKwdl&VI3sDKS_qsUXlSlfn=Ax+lgUqXo7}GZOLPa;`^$&Q6RRzk;w)My zZA{kxVUF;B&lV=yf_YlkRVB|3M=cl#?EMM{CTZ2ow6g?z)oDn@<4yu@W*bcOruEVn za!;pO%qQE7aQXNG`*?(c&MaBkJF8uFmkk?)0|TSu&40z zD^J6XRA1xsU{|qy`gFVh&_H&S_Rh7x#*U$_luql&bjD_5qm@|3IqBYDrxRx^t9$f& zaKrry<_mP1Wk_3fzI|sjEG1;*#|I5c`yJMps7!JHLaEYkJ*Pbd zs-m*>^goHXm;hM_2Yv-}!#lEk&d6fHn0hK_fh_Vb)c=bO9w+Vq+jO%vk3plOD+s$tQaMZY#Zv9y?I`WUe}79Jo`eb8wB zY_n7CAOfh+dw0N73mzammMZCRmN0UyU$IxUFC8r&o8P;C2_J<&S3RdQrT*S2?7-0%FJA8zx|y9TOQV(%{~R4quEeT?&1{!M6SWgcFR- z&_=lL!Q)KHrUfF31%viO|3%Em?r`b=^kE~;`;*O3yuF*nM%Rty)($WC2aHBcDN6bW z4dVD{L^3V;(KKJ;%W{;zQ18zg%sZVng1X7fe|_@M?jwLfcH(ymw5^Geiw@526cs+7f`(Ito_D5k?+C^Y2MuB zM}cw`vo0f$qzy|5g#Womyv_m)J?%4bjM)|V*@FDfyb~~p7|_bA13v#W-~l{qX*42+ z=&=KeDc_$Om}g*rSUW>Y1Pq~HESF3Mn8fS_*&^tllK(k`?gv1%^)KR51^i+;PAT9b z>H2haq@NoDn2v&H-iZl~tP1eISkC4Pa8a^@Xf46dMIJdw?k+u^d2hFlNsE zFdN>VZwnD}`BThxG5MbH7t3{<02kTJ;%~V8T;lnO3?X6qJN63Gbbqm2#25rj|9S;? z@A=QQsTY6$3&2dzs0n@l)pFT@&h7a>4E28_LkU!qQBqPG!vR^5Ke_gQ<$C1MXXbm6 zu`vA*IQvV@^#9|FNy*510UhX!{}tz`M0bI_$`?{X*1YPA=X!3#{9x9`ci{S|-y+flUPFvNdxGK8UzTSYef&mHLJ5KMIZc&_@)V9Wv{ z@LHGKY<|X~e+YugE5N=wLOaxlLl0rFG)_WwF|>c-=Q z`x#$)CzHM}Gd9RotSpS7S5!OMpiVBQ3(M!EgG78eO=`+cAN8G@yeAGnKU~6BZ{r(m z|4VAe&o2L*`vsWtwSqD6z^>z9R2cY&wHyD<@+~8n!<+_UFfWn78m9Cv>Cm2% z8vwcv0e~J9TG%toYHBdD`h9ycPdIoJ6${Jgl3uey8Gx=xvRD-34uE$Sv)Sk+%~yJ) zo>F(!$k^KpNAY&pm#(@rKbWrtT>UusiM(xCsSMe45xH;ru)tK1`2Lh?exL)!Jf2zM zJ!1EKo;|gRvo75A-f3i+!+sr!hwyQxgv346dE=?{m$Sg9grXW!_51n{w7_KchJiBM zE$Ku?J;R#4J)z|i$j|7Mudac}r7!F`F-p zr{o*m6yJ1v;Oe|FF_n%84-s!&P>4nSK@tkdrR4)KxHNh_kDxL9iZt2a;N1|xLgQ)E z?V{E7)tXG>xfNV{059qF=~lJM#lZqOmPWt6|4PGjyz_JS6yB?4ezXQ-3-GpwsG{1s z>H@WDO_0;cc93^SKc6quf@cakh4QfHEK46Se6eJxWNPWE8J@TO%-LnmyjrXkdBe2C zp_?SU7XV=zOQ!#VFp>8mHovG&?(F(o0EwsvrRBqt8Vb_D9#A$-2L5NwqA#Ax@7*wZ zma)#sb+I>_vf-U5KUhF|EqNr0=A9_H@Gae8*uJ_tnhG{^y*XBGBQ~2{zlPwkKF2$G zJ}ql+!JQxEd}TCazNnS4CgIi6&uG)k?O59Z*ZY#oP7L7Leh>L8>|77Y54IhmhY|oN zj6tr6Q@wo)KBG$4vbg=eOYTC2dlzEjn>TfqChDH`)G|d$(&F|U*F2nbX=k zGO&x;OX(E(>lAZdaVArVppW&^+EE${DQf^?EkYmb@7T4-`|mC$mriKTEC?70aCyAj zvzMwuwrMkX$KMZYFacAPkRZcPNWrFHD0Hg--p zP&&S+b;XNA=_dWj0x8X8(L0Y(XqE7*NHN#Vn8%szNrw#$3Vfm-Kiqap{Ygz+Bm3<> zXu*>$Nu+>LDIe<95z5)^p%K11!;y&J5^>nMB0YLJLCkZJ>1>LA%}WoMhcTp~Z>Q|{$@p_GDiE76ASQo5TL6D7Ll9Oplur>$wVivdXh%kD7pV@Pr?K0AcjEnI&5 zJKVMHC&lQQFbY3h2N6&vdv2qMWChy^(MOPP=BNYDkWu{t4O`i^rz@I5jBN2**>N%H zGPKv-?G=Hrq#xG5wej$TppZzb!18R0RoscM5$joNSZUrT4+NHgv5+Bb6vM!Cj z7*HoOF&`aLMb`I>t8vRmuOpA6VmJbK;^dB|=`8hBsje%Wnlux!uPsfPEZsXAGsc)s zGmeYej(5w{Y=fcuF2wz9C|0PoU2CSM;l7LukMGUc^JKi)v&K#n<&K(2Y4gf8NNC`} zfuzLFC#5uap;nT-iIwwmNt)Z9gS*}Q=S42*lOjZ^eXKm8ZY6@GwvyrxbdCjsydSA* zQ?F0OT)!cmt4_z7ICU)3lT6jW+JcW|mDyyrs4H>B$hhABQATkjzSgEY7ouQn8FDi8 zzQ-X9(7s0BU;IZfyqEE{Nw`iTkr=`)3_5Tw?C029GWR;>WA>aPEK|i2MT{seu%XHJ zQ$Kts7TF9b{G8^-Hl4!L`#P1Q9+QwC5l}a(m87+?!hv+R_RQsoZH|{P!%{=_Ntl^E zgfhR+TEc;RR@{yz{%xsIzFzKw8rKsFs1uKGa989^wk49w#T%P0Q~3SPu-0%J7%d^n zwXflSm^3w}JC49#k9(E9AJs7k7(W6MdN&(~Hzvx`?NWg*3OdQP!!ia=ljU6Bv~=?Th0_No zo!WyBo`gii#H4i6f*f#Ld%|rF(IxyVS&#-pJ}a!NnIjK1$#?(y;m@AzIBdgvj|6YT#U`w zat`{6vYTD7&IooB^^M7j%_B|W_PbuB53Xb$VDHK1EFFh{H_%u2vK?@*^h*3->{z=X z;q?{X_IyIA=87hO>XW%<)sncW5X;9Z=~|}*+LC6-pdsv~Kp};=f^@fS`}HSg5$TBj zY2$!>5(nrF;ii^rLn|1z{n^^C{8!Mb;5W_N{2|)a2+ANh zQ?zFAq~pSI8Nb)vHT=M!yCF4;SXI~Kt=VSHeNn=`fwD^YmCH%qG>K)}dKDFvcJ$)e zBoNQ5oNU^yDDkzJ&)6s#`b_-JNdDg2Ovtb)GFSP6@O(q(Y@Xj7^Y%f+_{w-yuI|i{ zGfX07R&=8k*OB0+8s(c_yLo^VD%oQUI$B$C%vuY$8AR)ZsEttzw4Q@Uv5ME=)R}Je z_1o(;QH}Y#7N#ja9xxUJOKc4#V5^+cl(G+7{csKbJyDv(8%X<4G~tDW`nPLJOkft! zku0$dDofyiyr)TF)b8K(7){jvn>(_X{WTEMQqMjG=Rf)R0Dn%C37iO1Jrjh`6xTZ2udaq(gLpA5OAQ&Vp_lhCfqnNTit z!)Eo$4(X0r<@-8UE9G@DK3`_BAq@u!d}=KFqIP}f>C{}-v0qx;z;qHVjg zhX+Tf3}XwH3_Yd(M9~oPbY}~~91XMMdP@+|QuIiXea%l7(r0zA8KWH|VA>LU`Q8-# zMJL7KQG#hDpJ;^k;)Xl4>{eo8v<0hX$uFtCbm16r%l7B#fd`{nW3JaNtn+l?65W!+ z$J$L+sv+`fGCts&x3&BO6ePgW(MQ| z-TSQ@VqQ`HwlVygp<=ulhUzlMu&j#hVK>waq4SZDq2$D`~9-Uw7#J%X@9M9cm~F z`XELf0P6Fy3Y$-=~dmJ(Q5$85`hZytoQ*mN*ZI+DM)5MeFUW2 zh2q73nHY>smnpXaKu`IXDHc;(!fiV)irj8SbUCBh#)4)Vq;$6fmTL2}M}p7ULgwb~ znHOp?k7hRlotVX@;VwKjr=7M;;OQwvor87C-5-=CbH>FnV? z%WOl9Z9m01yR6_{k5Nm_(|o17C)HiAFurQ9q9hmN) z=ZqYmvkER=3Zq`v1j?W`D-l}Uj$$PVP_NG0;9pcJ-6WY#3Qn;Y3jNt*aVIZ0G_Vz% zK6y5LLr+6yaB9Qul#;qG>R8T4s>+Ec^^?+BZCYCP7oi5Hq2fDv3fe(wgzlU6Oo zf+jh_>^;`6KXH6EERX6N-(!J8sACALNeTwH1rsYIH;D+iML0K-&`lZ!nN+r(hP}c* z2%r^jMrgBoMX@CA^AfUz^z*9%@rRFaLHdPSfxVvxzcE?8ScdX(vN)JF$lbGE5|(`F z|Jv~)z)sByn)ANDr(VFc?`+R#K1@g68>45ka)nZ8&zi z$TT5xT*$~)4Gem6Y>Uq-A?eY%>j|gyrwBWQcRTqbKlpagRKW3*7j;I1u^5k|He}5m z!(!_d+TGSBdkbZ{_x^srh$5@6U!TpM(=XUfKch|Mr7Its1tRvU((N4ZW^yc^e8&Z3 z>tgI#_q7$9pLJ2}j_E24j42yHtTU{$cqhtuQ_XMcLRUTpGajpFKL#!u!ay3z?67qH z(uI}LgL5k2R)Spd1tl28!{Iwh~k{E*B9Byw6dAN=!UqtV9nEPMEOgK zHR(_4WhA0*e3!^g@xeV_b8A(?J2Y%%AOT4P(kBH+GqB5)2j4z8&cv*L8y?$W{uf(Qx zWl8eT@0&61BDLP?*S%9XSTi{~fe~D$aYEPMCmI@3-T5Z)&{K0Q(W}M81-}B>au(`PLMaSB`SH z=<$pSB*YQHqJ0$Qo2j`5Qy}f~>`fAqhap!p>eODNoRCyh1fBR(hx#Ac2pG;~PH6F0p~>z(SGRG(7U8NssVYi3L`Il6=F#9>6$V8udjD~{VX{`%;(3~@Jey*LTcuDstIB;o+sV!#^Rlo6Kwuxg4G8wdd0AfFu_p^2Q ztb&W{SlJIe&CiE;kadSOw5W&nps`hE*e3;`nG#$n5Qx_`e+9FwIGH6gNOr}REgXLt7#nFGP9Y=IEOL-0_DI~hA6|IEd5H6=Fp1f~m>jtL1a2>u zcg!S<@Q;wrBRP=)uBa+a-z_ay@^Lb%RBPi_nm(&tYM(bJ-Qe+}lJ?ib_d2gryITYg zeZZdR!yFS#-pR-fH8^amr-e?tYf+Ba@Jza<_@}nv*EKDL$*C`zhbY6}Q`>|?m|5Ik zA}>!Y#0OJBrbia%H9sy7frHGsmqoj>b^~fxB|GbLv9~kjEP2zX!hdX$yCz*=zrN_} z4?`7)9EeX9$lgk_%MkH-`nj4!oNM+R7&>ub=7qYA!H-a^zNT>PO3)cGN9ENC6GD=> z^AyVG~Z^Nk=c zE5zYU5qG)LhH-@p?EKIe>T+J*+1uJITIOV|BR^GX5+Hx1wMYfyiazo5vUPqePm}doy~k0cs1EJSfHJNx(+^`q zE)x*R(;gF`Aa3F1T`qY(q(7sKP-g9!aydmsj{U8*CGWdF6zWO}O+!J)k z7QV2|e^%SBS543G(T(wQvq=*>DM{aQJ--`=+ot zg-|&8u{D;P-JMz(6;S8s(@(L!99ymd!Zow zT93p?9)c%GVfDldQ3S9tOs`!lohf%smlYDQsxN|SvJSexii@YXX>rGfj38wDO|&1VUV^9EUAVe0yU3lW@AOMGpDJA@(IiT zEE7tbSI^iQ%^f&kNgNr#5STE-C6idThh!z8?x7!tZEkzfh4QJ(d_DL@DpWd&r>9<<8F{f?lmKebz z_a&6W9Z}EUaI_2+09!nHVZFwFD%kCP(~sL!eHO~YH7bn!EG ztC520I!7za0Di62z;?23QMd`;zV)KSSk(XPwXG$Q(Sao2XY2V%p1aKT`7Vby@$wcS zX0j!E;{NYls9yx_sFo_B^2i%k-CxFStIuC8XLBZNj;X)2vVY`6-m!5%XR37XW)oL= z%?7Ie**Hhy=$?b_*~}QF1bJ(Vyjlv%{xq%vv9%HF)0{|G2gm^HlUm*)2q9{SWtI2pg0ge)B7qXa(NUiJ-}BEqs;dRwJBWG{_DL*CXAK&N!&bN5MOv8zKg6uHA_e1*OaeQn|v1=@vQ;VO_b7 zoDarc2N5@$_XE4KC~y4GB9PXoG!9vxnL!mZHA6Yrf1J1VPL~J_IqtJ}ZLE_m$ZX7j zo+q`52o6CI*<#kk>Ii9>(AUtW2j^{n5jkFdI{=Ibv&r*p-9zHZ&fHCSIi#D;TMdOs zHfJ`V>4K7?p6Xt(U08-!cHXYt#V$R5?riuVww6jrYEJe5D8uF0@gKG1mQ1r&DfVv4 z_S;hbPTRjK?ah8YB;fR0N*ge}Q;IPB7i2ytdDi~%l#hdoHcL~W-0gX1$y6(u8#x=M zgshXI4P`OU!``v~BTOStQh_tv3?~5NjH>my*N3Pgq zmFWVRT|Em}c>^JlR(53OCGB3`W=-@%AWANz}r?(5?N>gVEfg{S1^t(cYVf>9g)~GLdl> z*%aJ5$sChJ`XFzh1}bCu>LD*zkZ00qCid$oT~a)iA8PZ#^Z&gh?|RU;tmV7 zGy(-6Yja&b@Y@N0pVBn1WMM0v-Q_-kpFjIZbf1d$9_K1uV;k1eHclmEQs|h6*6Gm0I^RX+AKQ^#&m^djpLx1X zS!f@V*5VmJSpUwjvP|sFl#OW|g3o88)x+7!4O){u%I~Orq_3Y5bI zn)b!_k+@6FQnwR29gAT1XV1rrA-)G=GmA)p0lJy3llJ)XnNy6f|2zN5HR9t0{EN}R zH1pV(U13qkTMj~ZFmRU{PaM>+$QKNi0jIg*ks4OI%#9wXeP+)9jjE~kqS=nfj`dhy zCd*Wob6!0gwfjJL4C6FC)yCg`rpwk->FSVI?{Ny7`dqEwY_Qq%C-EI$L_2xcGG)C% zc>6eKwY&*%hlD%;ovvPC7JK>L53bBMAo15AyHxNw{SI0?;V0qCX}|X3UD{u=v1_lC zj~W)5@6PR`G`|*pw`oJ3S9Ztj;+>2)6=yS`_w5jdf41S&b#!`FQ$P z<0rolbv*(+(Cec`a!a}^-X8Cr#iT8$t6rsma<=dLLcQBHwbz!r7{MSse^BYMf}FZm zppQ95V|FVoO17j*MQKA?nB|cIh57#NdNs zNIcY2ipTfdgT}#cIw#QsX%5_{AOc1>Caw9=J}Zia{^k5@;)|Vmwdi8NJ~h8%A|!nW z+kbg#3CUdeQ!fOmiF#5*{n7Pe-TtmhJE)*1QX!;0;Gk`y71z>bMpGXfr>X-Unaq!> zicw(1TJX|OZ}=<>Zdw(p)xY%Q%fm6^=CsPGZf6R3}wdtt#sF}wRQVbtFG$VqwZBuSC8g@ z;)%_5+(;gG)B6=u3wjc`xBQD>YiEkfrI{&OtEsfubhl&jX&VvH2=!prcCAd}=-_aM zH^ZX3WFDg5U*$l5@}351%k+#OfSUA_v5otAvOYA>bk{a4tK9&*dlURY;Avg&;H)SR zA2LvdaKD&Z0r4-PXrMVp#q>@P;7+=^V!pRwZXTCKM>#=<0ne=z6U;LfC`7KUX?$-V81UjZW_+&JXu;pe>c%QCh&0YQM>Ar z3S>9(hI|*^GELICg7W!!JR@>XC@UM4*JBm4`4n5nwWGPy>Q_V&T(gmp2-&23aZ3qu9 z0cInuk zAz`GULN1SwvT~cwh{UXGM;*WV2mRvX_OqdS=8$cNQcDn*!E30QTk}l1v)aID*rZ_L zRl}8~J}XXmCY4H~y%?JY^tw*WM}ZqFY^O)g!WYib)v0wi0N(q?HKoTu1Q1O(FyO+t zFW>C8fkS5>QB#Cum0Pmgl8FX{9L!c;P;2$&MUSo8cc0f0@x;m%-qq$vQyjdVDcmqM zKDzHc@_A6Y&_2%DoI~N+^RLf!?|0Q>{BW2OA)eH{y|+Lm9cJHJBe9z+!1v)@F|Dvj ztZ2yNwS(f_m7l`)Q!AK|XonO5lTv!HaXM}!x16f!A>;>;M4?%1`NFI*EabF&ms;Bx zDOqkBu%cc3bOOCZyZ!>Opc%^B6J>lgvWR`)f#|7_A+NnKR$|{ucFo2req%Mo!0!Lm7BXoNIR(} zi6X-k1ub2cYh*e=#%03$xTylW>$j-;9nt`2L!j&O&k?s_=k+-pkF5Q};xLJHu8$!} zqV0FhQ7^*QN9%ltSQ`6uyepLT*@o&S#8A$<*Qr}FLX?W`vTkd$ey_XlI#xL3X-i;d zZh4q#_WbiJS#VeT@Hi-pS&x%u8nm6$-6l7snO(1&Hw3=R z2w3OGzsw{7!Kj2W(2BMshLfpqkoa>O(zRqG`n|p-%S&h1-A3lz_$xGSADjL(7QcA3 z+C>->6rGK(S&CobN6KxVlzXW*eHizdZU{bcrA&q7n>AJ<%k1adM_wh-NTy8fyX@+U zv2V|Yx9ZNZ?hWbr0>RV9_V?@QQ=a@&doj?tAMOEitFzZh$D+D*l3{qKVjyPcE#4`Y zg8lG3I`{O%6{z|HHcwjQ5l>YSKqHE#pHl8gN}rhmR2FFJ^yGpI+of*>6ic5URK`qJkgj~(~Pp!Zzk0wn@l4%5YB z9Q zk7`>!6YCTB*`q7+{0<`cu98I7VC|JJ?p;DEcW^&G%6y=Z^0vI7jZ##=46^Pn8YmY4 zStD(qBd|qZ-Wk9TVlA7Cpg9p6{69O~p8wk4z2G(_?Pk7i{YJTx3klwXH^{Bsayyv% zNw--;@owB`EJop!{&)2t?ScunmGXA{?LM2D7S4D^;(nDq`5Te%I`w1wKSqHtc!^jx zHKsDEik3P+kv(`-N!FvuRd#m&(4M>fCI$2vg~id$(Dt-hk5dy;n|$Lea*igUfauc^ zUtu?#@2(D;pq?p8NV#yq_6u7&Gzi*mk8b28(G~>LL@-1hD->0_&K&srE!i*n^u-rg z;*Smt!VUK&*cYh0wC+7gkSDK%Z=<|ns*swsS%WmO-tXCYEAH7!YgzmE{AOL3OPLRb zQt(AaG9=eUk?h+k@zD?5c>P>0|EkV7eb2?~tCD^J0k;|(v)y7gnZaKZjXW`*a>X(A z-2OWH79DjaUXEZgDSHb&UDuWkLkzvd!P;CmzRtmJ*|mA=wdgIkjzzd89;cajo^*@$ z+rilCp(V4-ibkWQV7_s?2ok@~;yD#to3Bk|5wUi^z*@H>VmJC_+HHA8Sk>7BFp|~b zt-1*mg;NoejW{sxKr@qMEbF$E${i!kEE8X1@_3f0Ophjd%;^z^qR(o4sMn|5yk#bk zOS17~&E1mC(ce>?_3fy%xp`|+v|fkoLq$H(lrl!N6jqcZKkzQVA_lBlr~ zvus8vf3l5dF_6eL5)BLDknM1J}LMM30ZYp$a=r89r3Bav zZOI!T56`q4am#j-=<;kfyl-pC(#XC(agaD&ZBsNT4!SRr=B*z<0Q*z#tnL`+BG-x< z2?2qt#K9qz1OeNZf}n1N#&Cg!PU3|Ym7kK&16YS1j1_91l=+&jxVtNtuSt%Xq>J5{nj#MA|Jc%DaZ?*BwV{Yp7SuKtfh4b>kQ=cBu`eDscVWV=v!LZ zl%;SvaNXzkxu~$kf8~xNj@ymwGBOLLf=F_tSy9jDAIlFpgiqR<3^{GH*c-DZV7S9Ko%R_XEvr}j zJJm^G|IN7Y_942^06q3?(Ov-yb1+6{@AlGS!0H@n#R~OEXf112Coyg#Rob@R1sy*t zRN8rmtzi=gNhNy;JIzllP2rf*zM~CDc6i?#o#)%TUE34XEJwzMe|Z&@5ZDiQhTwyc z+s!xYwdPeJ`f*qEUXM>|%RM(%WF5dY`{fWmyV-^Hv@HJk*mhKD2JbWr{`t-xO#ZZs z;$TbeI&G@ko?68I`f0e9+Z$)tHuuD!`B|C30f6Se7PJ@qn{G`gUko$cQWxpgdJa=( z4Vv@cpq2(g-h-*p=eHz@5W==&z=0e;ko^)5xG?+5One1b1KBGjV<-yckLE#-IlxcG zESXv4M%nKkRjjJ92lM5yI8T<>Z^*d4!}E=1-L2$;td7`kUNKEmikZmLaLQAHO5d6Bc+e?BL1hcwgh)Wp` z$vgXy07ZHoW?KQ(Oh;Be1)l=>alyp$aYTXqoV*yqtRj>rXpVZ%ziC4@ z!#-AkyUdDL=NJRoUWNM|oeIQVv!3`)J{P!PM088bG6`qgFUkV7q#t&uqE^y~gfH-H z+rQd7y{2S^ls%g2l|C4k(VS3)N2A+=DE%7mjN=31C8&gB!PFHTjir>I#q{@Yc<8I{ zHux09jiRo&C}wGsV4}2t{#BF7jjrH?Z0#!56~~`N(6)m4@)Q=hnL_qr2DRi zdWp)}Ct`N5+eoPF8b3cL1SgOJdXH2&eMkAi$t^R9Est;g1gm2@cWo?9?UgN`;;u2; z=!ElLlLgIoAe-7O90q#XmO`8OzvbU#fk?6F}Ec++Cbo^jni(s>w)wvQtuPRK6{ORvuJafi(+ON zSzX)O5p=ln-Ib1hmsU3qNZBJSJ7WVmJ2tVOB;L&V5ZWzuV+hzFG`@dicUTslmx6%L zujjo^E`zOY%-N;@LKK94SqP#N+Dt{ItA!)|`v%e1wpkaChVUIznCtiClJV>#pCc4N z#Ld0kP)bGzC~l?;6Xh1D^>r5yl6 zNk|n8#KjN$)ZH)OHL;Tq3HL$-q&maDTz{y+!r8PmdDgOFFc6EWUkg;7!fT^hSH^VJ zrmYJ)m}C#L;y$0n;z?xl z^G7`LYoi$5ZrSr~m3jV^cRA>12l_U{9pk!AcehZ}R6DY_zY|d4`~%4cP&|N4p8o9A z^xy#WazL=LFCs}m*$&v2BU8&>IS5~I6Jogr%Pk$4WWH=T#Y18Ek2|oQQr9U&0V~!M za8-T5$M0p^EcOa74Ur%N3~YGu+MaiUiJ5<`thvJJ-p3qszTEyGg2uzR#EDWm_NCtB zNh?S@=T`gP(r~6GkgDqn2`RUtYZh#9|4yc#t8w47)~R(9m2|8!wBJ9xzJ-Dm)HHVk ziDnTL97s(WCapt_Z(!D`vr&d<#kb;C-9Zoj>f0BjIad+#bIC+x9^Zr$LT$ncmeKD%>f>-qDxV%e;dT64rxcAHMy z)uYps5&}+-V!5Pm(<68N^WPoPGGwR($Y;YP z6e8C1!k(Q?UZC@`PyJ^x#a8T@@54nPjyvg=l1>dy6QyEj?|_Wq(w2>`7JC&(Hyb+5 zS$}ZfK1ufT67A{l47H1Ic=z!Ix?zzL!Od3GHA2mbuNx}tI^#HUS0I6Apn%|9aOm4g z@#w*M=QoHV5cm3c3AgRQ*MUgDW#N?rEQwD@40N=xsOKZk^#YJ0i@58HZk<8f=@q1njYm!<(&I$XaHMWHYXN&Yyu4?b-3}~U6f!Nea^Z|U`-A$%$Qhtop z`t;3tM|oNqcsJl=bAhZmaN`yV)pW{l6XbyYQMrT17#+z)} zGbb_+%`#E-Lv6MmzEY}Nm{h3q;Gig9txxo_RFK`csIqLbpviFHBvS;%=P$P_NHTl! zLxc6<0x3a%)pzcT=W1F3P1Aa9_^sj#fT{Ub((~slZj4RXXRVPSkN+at-MD|2t$X>H zYB(Wi8NE-N$}Z6J^b$j=^kgMh+7ZyuyLVV2-`}mN+Dhx(?Yt7d@BpfY<~GUj%IL?+DZ_A z^VR1##u0Y1BjpI~vs+w+31f{;{_z#_K#|tP{uevOj<9&=%588=sZniVrrHt=UN|kC ze>C^_qlO|2040e_pQW^1B{%i>SN>lzoPs`<)jm+XjVe$hckMw;`h% z8VQz}(ryp%DFA%yyPr>Td2=DlGb_r&Q;F5Jajbvyt^<|Xim%Bp_^NjVz9W~c2~(t@ zA0f!_OPu%Yv8yj;07^ZsK*&^vZmN1k!f{iq>KNmuVo^&q$Cq#qu((C$kCY894cQ4)`{;M{RU8F~Aq}{`&|2)_Z@b`bB z@6A?#8&>FlTefryJ468-uSB)(U8fH!tD8*(Fju=j%EH(8E6ZWlm=NR;3bN!#5KA;xsN$t!9YG1kU_ON{D`{ z3!Rs#X8$cz4onUIP+CdDIKq`xICvs*iIp`LP;LL;t1ADimj3ns+u0tk z+oZSNL?3MM8_8JeDD2%R(aVjekNB8iA?w#4u{7}Fi?EF?08)A;4Aa3JAzQXeKG_1? zDzyd|EZ;ziU=rm*$Zs6b*`5ZlU)p%7g|Kyc=|FBQL2I!}4+PZATOgM3v=?F3l2!Dsk(rYiQ1N)U8|0F&%ZSeM(Zfh}s zwPN_UZ1Ro# zKCZ~}cjm~Y-Pj^|@737Ay(N1Q8`u=%k_C51cN(>F?7+nsY{QS+8vRixZYgBvsRGcmZzdsMiEaa5&hO0@R>8hS zB*ixHG$G3mKUn(D1s;}YWwRW$?5!Y#c^h1~dFx#TZfhnt*g7jZe!bup4x*L)tIP6l z^oAO>`6nNEn*g;~DGai!SqVz{t}kFxa)-%p+b#y(?p&mG;#@=h-mui%NJp-ueo^%7 zdz}<-agc*=KBZ@dm!+C8U#<7g2fvX-F6aiMx}Lt0Ed)Kar8jQEBnV{!Q-~8I#pzzwIx-mpNfO{%P2p#$# z75sly%U_cLpXK!?j`LKE*QhldUOig~JJ>xEK=hp*8&Da`Fcq~$ezjVp7~{TO>3(%> z7Qn>?_MTW)Y)HD?1?$L9RK}3}%PDIj5AYTDx>6l+aT`R8U8pGA@M(4=^FcwQ6_a;y z9e>E_)v7JxsFh|Ok9NU;Ee32|u*F{l%^eTE+`C{5p!{6JuZ$h2)AbC**z`Ii$$G`^$ zuqmxxx~hMsXS~h>APLKSb7w$76*4}z-LQcTJRBjvDwzO}{Gf2A*)kdiuTr966Dl_> zLHe>YVCM22z0$#mED&JTw~iho{Vk0v%(;w8jT2BlLm47@vspVZmMbzFKug?k{xW@W zF3g~cwzVqxy)?0V3%YwNLqClNQeao^-vjJ8Zrho76|A|;icg!Ai?tmRNB3fu)@DAs60*Z1AT)}krwBAA5ZTqi?JV2LJ{*I7J6+K-~zwluYA z(;hia4z9)3oZGajaZvg!Bk%`lY5AA5C<9YaUqA9$tEtYb$5+H>E%$rMiB)$wPS><$|gCMqzg+fAAYn)z0r$3>!+92m?u=GG$`4 z$Ls{PXw!YMJ5Hj7x*yhe(&{(*moN}@0G8Sw;8?Y`C21AC{!-+h=R9`oHwEc48{kVx zsZc?7hCXvnNNt+iMmUpkq}BCAYF^;Jv9Ws|4w!qE>^F838)vKvT{G^3Bko$X^`bdk za}_(uEbsL!jb%xCc)YVkY-3#M{s5Z2(FhdsKA)e&VQK-=?~wh97o|z&P5!QAFt(=8 z4&Clml+x}V_X+E%%Je4mK9R5`>NV|>=%El97dg1Fi`X>X9odx4cNqPIELYK#uC`@` zXrH*YYqCRL=G?gFwHW))N32xVzrHAjgqus!FS1nTH!U@sM$rqT$z>0gG6=YIU?9Gl zKrQ^1G#v+rFx8l}Ovr08o+tWRH0)aRY&1jJK)I#>tnJ=7e61?QZ*!W9uA6i#fmb`R zm4c?B-GN0yM#)Z9`WEV$$t$vd9>x2|ug9yap4YwUQLaxGHggiL)tGoIru@2jL)b20 zfc`${u~>z9T!vK&Ar-J|0YdEblpb7YbKLZ3&bJZZzeek|(mJP4mO1Vva|luAvf*+aBB6W|4XhGJ~9ba)%Paw#$*H>LAjj0@%~H!XcO4eH9-={Ce1>p z*wO$X<*tGKsARNEEyh^Y#&&HrXP56nw2L1b2(1S~y|*Tn?--Wf&~wGM9p-xnGPi=! zlS5~TPyQK@GXL_o?M?0%baGTL;?vL3z*E$K<769_*A;t|ryk16^2=eosl@KWiAVf&Ryw6dFs`Bu z->{R>+|NKMQMjgPp>`achqP?GRYu_`f8x;Oy@5qjSi80@(aZqd_7;Z|WUZfwQTM-4 zi($^xRUL3eZ;O=}HkZx@BRFTOgId{qhtAt33t8U!$aI@<38|3{9F~r#&45|A<<=DNP)XPE*Jbu|F#tVjZC2X=r49v^lOn6lrNZvpne;y3&3 zD_j0~oitlqc_6UZ7OLJr{%{qUky(@m3P3F3%!k_rED6-}s z1ICYBPjrfSR3dKaO2NHWqP;yA%ZIiDVwQ~@brS$AgL>zKcUsoADK6-i+sYIC1`QdD zA|#-}PUkuKXqVWVz@ZELtsGExw_Vy7E`96EYKORR@+&i`+5OeY{cm&qH-3*%%J>JR z5W9Q+#(t`k?DQY5_PS)M#f!4&O9{3Gyw>Lg{hU>xhf6_M4xk zt0i6d8m3NB=Qkh0^b7o?b~5d^?c5SHg_>h!yMD6Dt2N*@{Up<0oXU#+#j;5J%{9Ee z73nGWmp}gfP605Ts)2)11V0}8C$1siZ?2&Rih1efKc6s)oHoCaaGNDuHh*}X9uk02 zCDw`d|NPH^{AGhEb^|=1zG(yH?mxUPCm-M%@;llmF8uQWaOQO+0Lo0LE((kMXG`Rj zqB;PDGe8dW0eqpqAuRqfq%&m9znQ@{y41{n0%gVnpv=Pj%*WAx0%eN)24#+y0Kfx- zaeCE%ydVKC9OkqA({ut?Q$L6R?foqNZ$I2WJS^cnReAs6!6)nMDu26m{dGb)qhIxp z8#Ppl`096(*ISDZ8#U#wizNWU8POxPH z!Ml6Khmw!aAoqdF8w*zakKtGv7J444RH0UyL`R&$CY{tXxsdEaZ!I9_y<5#Z%I6-{F6L$3Id{v z{L-uX$ANzZ9=gU_eeRzZ88~nl0WoxZ_JaG51Ah)Y^dagA**~re@WG>xfUMTttGW8e zfmZ_$eO0~tM~Gm)3b3P}UZ43R`Y-kg@X&MaGDUC`$EC8;D!FxXLnBuhXvu$PCoqviEmz;$KI%-d!@@4EKx@ zGni%d=EfXWz1eeVaK=9K(9J&dvb! zhHTSGt!(MB!A#MjUP2Gee~Uk7?YZq{qE77z9t{of*+>iL@E5BUVd~cgT4W4vXUewn zdo2%(i`ord9MTTnS+$84Da!PXm~n=IhHOE?k{eSn-C*0~f8%WV?vt$p8teZY!e=dt zjC5pyW;)OpswrbIT^6C0CMWG$DdV&50J!89mvytmW(G4Qg|}-y*j;AvuAQ+<2M=z} zgh2O%oA^e53?i*NRfId|~v3$~@*YUZ%0T{gH z`vdTMpy)Q|!MjU#jTqmk<}EnM@&yn2cYO!z^I`kveODbO>q;C^)dw2NX%-K_C*juN?rp6Vs4Es!ezapqU; zizfu2&?FUpdNe?xVwa|!F7a-e{v>Bt-1;-Ms~SL73SS!|q$T>6;^Z{6)5IibXG37k zi^wc~u)UW_seVDk3GRS?dvPYN; zNuMKKcRQ_;2N*_?&6&}7$cv>}T-+$tUCXvs!X`ZXcR~kMrY?q5IO8m7;r{_|bK}1iQN!J{2VHSmcT6hl)_hUJuA|?m zOrSApx{vJY^HZdE-ZvB)TcoFiMEV~vh~2Eu+QdH*3i_=~pD?E^RZREwuk^eL)#7m6xH>{+eW-n&-q z5qlGgsYci;E?p5J@UdEfJ2@<~3q?J-4qTlIg0&+L&^8-l7;?X&I;|6f!m_PjCpDgp<;!D!D@|fM zse|osd%KBRW^_Z;T7rr{$GdH9p0ZCRRY_ILIDbAOzq@Lm1*iJfFq__pJ>$tAj!kdK zJ?x5qAh4|*de}Bm=Z>Bf7LPa$_FFMF?X`*^6J|0qJ^KZw&bG)r9=Ax}n+C-4m+6X+ zYvuw8%MAP3K~FQ$)*NfEyRqsZ1HA?Bqx_`IB*haim3YV5Hev^5sEu-X&u{SI(@Y$) zR!fA;9qPSW^uDKvvnfISWMGeXtWZ5&`>CYSit%d z=%JW)ja#~mcv^;C5LAEFgF9j~&}sOhWk!9O)R!=?;~(V>Xja#aYk!~n&a@BuYo-M{)4r4r*ApbPf2i~FcQ4FyD;NI<0NxrHEYCG?xBq{|#HC(mxK2$_6QI@j6~ zcpop)G4ecQh2?EGOjM(7dfy z8qG`XViUR(LUIVxcZh+Ug;60hodL6`?>3{oLC+N!f1d zPB##v7{QSs{vksdRcdVYRjnOR?_-o7!oNKd1&8GAkH+;Wd~IumKH8u1?7bvI*&R5a zE^>*Z1w;HHRv}MBOt;61M;p{svYv`HB}FYLzw^lj=9w)4%*i}Lf8<2Ajt5hpP5xM}QqPsARw(Wye1 znfKOJ-P}ydbG^m-F{>aflh;FZ`6QsTqsMJnPfSjZ$o{TUMdpMB1}NPQoU#k^dk1w{ zSTVJx|BSgtOB~RKM---zBJXSCO>98D1d$k9AfN0beT52;slRund!X@h}`PDy@ z4VeA;Jamn>P$lN%fiBuQte%ZDKI{bOJN{{H?uu|KyR6EqV7oJ6cxMokKkq{fp%xyJ zBXRn*RnUL?KK##9PS1{`-)!Oa&1-AtXWP2BuRh-h>xD=To3iLJ z>2kOL3B2ysTHUghfZ8CP7jNJ5Ny>VuI)1%=Pzt6iR0Cxpy_WKvb#T(Pc@ZN9Mz)y`7?cvk zwlLF$8yAEJTfiKHy4c|dTdKB^Y&VtPr(co2Ozmjbn;j^Do+-KtHLxQTH)414+dQ98 z6~Q7(gshR{~fK+`OHOmV-qfMG)QClK9GBlLp<81h1JhQR?ZV&)Q2 zmp!8BhG9)goA~kSq}|mXmQwB@_sOckBYi{bM4ff_MJ7Q#` zVz!g8=Vp6}AyHAD}xsWF0i` z%I}v0!G>F6{s{zbNJyXvbdV?Vba5t;#5EmQlRcjUvn2mw?V2dopOcsOD3%)pdMTxB zGFRLlECU)*Fd)zOjlBXaECWKqcYd}-CspaM^rvczV8VYL5-@BEJI$Sn#~ncHSdbPR zvEvjx`iPz@CmEcLe4wXj)0!1-{k$hA-VkbZi|ok!P%35zZI>%4{g}q+8qdE&mOQie7?Sh zRpQt3k5%%+BgJRt%^N7B?4bE+i8@^nyjFK-s1d#X;g;$i7+q9orK51bOn&Uy(0ux? zZ1AU~E|;vo=)pRwHdfPpux?F!GZ5C_IKR`&<<2k!4A74A_fBh{c>ggsVG?4s^8cE7 zxIcbpvtDTu+-xu(_^UkQvEuQ@ayx0hR`(uagTEE+P>FcMV!)YhOTf*zdjr|b0YsQ= zgW53m(1NkZXgBXs_m*>(r_r`)N4QKrZr90Tn3kJ!{Y3t6FD&E6v$NNDed!VG@y7x(nN5ueGm?Nmu4e1zw!0!+3g&F0t=4k z&FYA!pCWkDDWTHT?8dFb2y62~!j=u9D36Lq>1$b~j0|)9UFyX59492dWNzrDPf(JP zw<$~t=ynU_`mx~(jl^!$^m5~>x*5R`Lc2OlTr*8Lh`NH2F;c?wYH2at@qAhWG9!V8 z0k-s4*9%+PgEnSHfmStI?u0PHFLkJDz3X_-M2XcvIralV*4A`eezJX(yrx)|L67vH zr)p1X+f(i4PQ)CyW)#T|cVJs)NN&bq1bB7qx~$ZlryZUh4fO?|v_q3tX9Z*F29vF@ zBBUr`543ZoB7UDs8CC)kJu;di$Jpqq`~~jh1!U(|{E<3eY`HZ?W-SC^?OqsKDf@I4l5Irsr!7sz_T7H>;|_k;94fYxoI@bPwwV|qw^bF>I!wOVr`}+0$y0T7oMy9h zs0z$3_=F5al#J_a(?=|148D%XJ>|H!-WKNEnCP|_?ui|s{)Q?uZJh?%oI8Ec(B&mSYlR^W$x~-@|Yy&yP{3Yj@eB$-zN!*zc4!^ zZdYK?;!imd?SRlGZ~9An-(Y6#Y}?78bgf5UM)-8GDdOFq&HGVnkK1%gSLb&JiDh1eOE?5R1#uNX#&=X3={NUa=l@S_zmWf^& zK+l*w)yT|26P){ve}x^DI1%3A1j9}Z`R+@2wP%^;%cU=ElOdms$2t9W6AMkUH43$+ z$A*Fr6C80!)`*&w*L~EIq5GRJphD@WGIVB;4H@Y#7s7AY?Wv3RTWn)-+TTEAvKg{+ z98iF}b75hj0_!w~WJYtOuoMtUWd9iXB?rCfo>ZM+Zr}LrCUU41EJn*cKjav+@~$4I z@4h*H7FT~cL&(=>-|BO}<|$^Dvu!rre&9Fe+gP$#=VdoAmTvfbkNr{8E~?_1#m z?%)MQQoe<`(c%YL?f0n`^=>PCUb88tH?LV&s>WqoU4Q~x+OFgnJ8d4ZWku2wR@cMr zqQnWn{yUFv?9Ly)PTh;a44c-nkfuA#nay^9yyh0QNL~5EH^M){eg$%pqhKpDB$Gww zJ)AjC#q#ehn9~GRgc#XNO|A|U(-D*M>PZ~%Iy!obtzA}{x zz2jl@CKv;Wx%0dqlWIKT4_LM{ml8LCWPH+T_%WlG(qxq^!DQT613p8Q?O-5bz0NS`fuIE9D&@SAr1=z5;S6~JZP=MEtV)d6d+9=;M!L*B zh8LLu4#?JwlVh)Rr(*?s0a!pSe#HXM62WddP&%^Rd5gqB{af*PdHm)^>&(ohKc~Ng zhjIL~|MTl*7Qqy$R{vr_iy6H-3NtjPW? zXGFq`xC3AMFw%Y^?WweA^5y~FM5j9vQ1Vm(onG`MSLHdssPl@rk!Pk0q>%P(S^>&6?dANDv}sMh(!d}8*PmW|L}>bc~s>?^yJ>&#NDwA zWFlS-jyPlwgUh3eoAVq#ZN*5ntu~H@bO3eo>>Ib{vL^wpOWjTOWgZ=E}$bI*?F(VgBr42D_2%@_L79tkg<|=7fTA zC)O-GS?Jf!rnoLG?4k_0bb7rdMDyc# zQ}!t!UbP=neNcL8~~%foKr;bwAowsiVFK9qDnXR4$#;LsgteTjE5&W zY!u~3gQ<_0oG~+xW$f7OE$TeW>IWr*95Jklcc0V|FZ`7s zp&wrAmR4a=5xpk2dQ<%OS1)AN^gs;Z#L?mRZtgE5PmM$Ax}!v{MsU&L6bim>T{cB& zz*I*!A5N-zT;0?ZcXrCr{}KK5A8?7uBu%1QX8)wOm-`nfL;PNwP)kO-r0AV6SY3F$ zWPlbniZ)%v6v0~eVk}o39K>*TT=dD(|8#h7aL0M`#ej@oVgNSAGILld!y&RaRf?8- zT2k5u7AZX{49`ta5<$5e`A4N24ZR)ahg;?-TbG=CP9t5*v}e&$a--`s8YwT}~91?esml>ZkHvaWHpw0lcWIb+^9DB1+ul zsnvK_AMFbxIwzn=;uyqW{4%$z8tgvVbchN#Ra`5Xr4x}JPHHh#Xx@qu>u;o@4t-&W zF&Rsqj-j2~d>sjN9^?Ztq~U&grP@nH+QY&|V_og=Ci%bn?}>z37!mT|R;@!2O_iK7 z_>j=^aUVu*c~Niu#nPPXFaWq&DEW?BP`NmF5_?P>oQ>e0NcYSuGNA-=oO}wCQvt_; zG)gIc`wx4Y5kXikuKT5sSs!vm=fjc_ZDe$DduZhQV)IXDJ~=N^-&Q(XSqc{HBYRwE zTRM2^>f4Q~;C^2p>9Q!66u#D_4tQSvNQ*^w$VlcBaJcrX zm``W2Y+Qna_XiJ5R}+4itHl0N6j)d_6bz9eW!1*yCJdEC`-YC$)R(91KOtVT6xSh9 z!S6x;{yF#djQL<|9BJurVSeBa$x2OBxvB@9| zD!o~^C2~!cf|`ujR=>c5da$b=!f`nA;pRPYK<~Ntkr4R|DSJ$=idHz$s@5B)Fn-fW9l&JV;PO@beF`Tgb4aPFB{EC*nFwc3 z4-CFO2w{v6GW!Y@Tq6xT0xs`Mj&<1~4Sa8@3^itY`tX4@j$jVtD=hBmrDUUv&&DIp zZsEf`>=IR@5|@U5$?;K_L>w=QkTY*#2&Tuf8^4DeF(`0xqHS05OFcSD7<@y#qR}(o`k41i&`bKBd*1_Qj$3K)n%4Fm*(~=BN0i0z z1-S?5X~$HKVoLFr<_rYnW9po<-bPUMgN_2{E}ostvVxc0TtV|(9%^7JBjr4*Bpoif zmeV5!7h*epXrbhV!q>7cIoQPAH?o;%AidwX9r<30VY@cklm$S5&z|^Xz8;MhbYX;t z9WBPC14RUD4PT4Wc!mhM*3{CWV&>}9M)>TYuljptiu3?%hx(*n^%U|;=%qiE0KA zlhw=Ks46zu{R@rlToA(huDdL_&6L(5DfI?;B?0|6HD94tw#gSOnt^$D_ywM4hU51{ zUY|4o8J4ll=N)0Q4}+>Z*{DlT$HzKpHeW_d3*OvSA6RQUDtNSztSh6Cha0u@YQupR z9V)2K%wiu2DjO5-@F?qjEzOZMtrc!Lo0zUTQF+EIu&@MA1}yNuio0L%Dj~hZQ#Xd; znf+yzu~8Sjd^^=D;QYDJz2_yk6=iZtldcptTysR~WubzE|C`Z}&qW#22H1-ll+7?5 z6}4|l9-Y3}4m1t*kN)2L!_(4jQ(`LtpQ1k%!F)`%JI3m6H5?X#;toQ*zh59@ z$lnJtI6tO%p(>4T&hXFe`4jilFTDAxx;IaD&Wgy88JDy#F31q-_}(B5cZ5l`uUjw# z`Br}4zax1w@8!#k2_wH}ilUVO5#Mr>vGM$7Z<5fnRd}x|qoQ=)VKLR7tdvIMteAA( zWib@D!CtNgtF?cGUaKbT8wN)+!_vlpMH}5wC)x3WrP$8o1DWhk@$NP!FCQm7`IOy()(yt4fjQI?>+^wX*=?OS%cMc6K&lnAAFqSsjFIIm4-pT5NvOojxXt zr20S~9M^3VpG1gmxl6AapcStyCA(D#R5T*PSsdLU?l0oVqU2e{!A~9rH*peSkQ5Qq z`NMS7sD#IqXcG>#oWtF9^Is8>WzF}mb2FJ-9)5<3@9VS;w9}6odFIN|aK)S#6T~+L zF_5y195m4jp~+_I{>{3(^udwe)I&@wJDY`?N(!yl`STdbV1qu?>WFtM7sn-Rr^MEH zlFhI;GDzTM?nGc*R$PNbc_DhMV+~PhF7}?W3IDg@&}5>1znebhzPSX(1N$|~T}^A8 zxviOU!Z>JwSx%2wnhi!T9?Au(4Wx%%N^rBLP!uV#0vJhY|VF}84(BUgGUgbIz(+zS^Ai3nI zoYmhcy6dNN8*S^M$#k#&n`2V}-Za*1eYl(dz-N(8|Fq5@A*gqaq2GlZ`i?|Lo@o2z zS=GxOmcU`sSrs{BqPzZ-mFkasUBxK!DcQl>e@weLP1xulwr_L~TuGG!SZv297&<16 z4KAeXwqgpkjm(PoOCjCzoGClwvsz>r(f&sg>wan1Qy9<+Bk(s6td%2b5~-+BGwpRw zne}$%Sl$tMzM(qq@Qpvq*hR*5>@LF00A54tmHxfv{K?`eU8-`1CQudj37xlBxz91jlVYq2T-(14XE9jAMuLur7HiX(wU?%yB*870sF7 z2H~IBUQl82`3+d$T!5*PnFIVu07amiOhh1{Jo`bB7ine}sKUJ3&Pu224TqA&LY1zB zY=2#tGC*pUKz0>rCK`>T1zvvLD_1&@Sx9Dluzou4-Tm_Bz5L_Q$XA(Ovd7E6GOk8& zlyoTW{@Q+%o6wLy!=l*kka-f%uuNaAjX5shV@fq>Py6-as3LpfA^`Cs)F}^U^LSl_jOcFvyA?8PeZGF^Bpeb>p&Xhe6k& z$cVF(ly%D2OnR;30;l`kSSJ5S#QvCdMBtANT*TpP_Z3OkG3ghWHTDwYT9c}hC(U60 z=*tgjoX!tSMZw`Z2j*pUL)LVenf}7BhATSmWCxf8k^kzds0^`0h@d><&k`QEU`r`Q zFWaczgYKOWRyzu8@BXXxp87;BIMo3(dLa#u_p$ns89vQm<PeZh;G3MV^ZnM9uARHk4V$yOOCglWt3G_-W&G@eFifc zB&MP>^fm(fbsi`ebOh1tNAzB4TetGlgayrs$tr;apPUndT+XmHiudwa5T{<&=Gkt) zfmI}qA{kg|{k+sf!RN2l^f*KS*t0I0vESt=bGUH&FEl$nU+O0resw@GAZBtCzAR~9 z+Z5oeMZHU!3rkvyz)k-X3tKY+ z*Pye!MjnN&Oa^@j1B=+INLCM~%P^p_A|(}5K7npv<41^4Pi?jBNm_bK^@A%4PqBVP z%A)pNg-!B1{Y8EM1%b8q1y#093g`O7gv4g*0Ne1%^f{{qfUTpqe~?c#yect08MuvK zufI`$v4^r>JIzb7DEowI)Y+Fbdd8OR4=K^J2H%^Ag6bFLwVb7RamAdy;6+()powa% z+k1P4$N^oYbIyIuybiAUnpo%jrwBaN!_rh=^Wg!?gV{0=5PI5o94$3}9n>pWH9kIn zCT~=&t6L=gvy_7RsyYu{O*OSfHp;boO)wg0hn<_X1y^Un*4xXgtdGT*?4zS0Zoc|! z$9S7})sGkXO4Po(1+6NUv;S=hu#R14Pjf;;A6GFQ>;bhde)*5w6b`IQ40Pt}l#k9h z%57;U@sc|?atir)q1wGAo4n+@Y0!2iZ>f@%g9KllWM>2E`$2L zQ=ct~KU?6rr04g>jX^}VgvO`5t{K+uhaaMo#LS$;x|dxN_oZb{kv&q z@a$Nt=6eM{FxvwoX~iW@lPIRl>UhD?W)$=}+QDut1LZ!8kDeDVU0mY1FLSb*?@0&-#j~6O-R1AGlrD=?A=p*P_y;_BQz2n;VKo>4v-up1<58 z+tgkmoc_;jr+K|X5*sOl-0aZZv;9nEPfR=&zM<=Qb5cg#QLa2$hAM=YCOx^RLXlta z2V!Va>?jGc`&h}1g?Yqq_#*9fV)AQ{|B(?QLg&KHR%_5<@^iaNqKoX88|-?W(K-A3 z+PSe#-z2|S+Vi#fwgwWSN8RCT99MeT|5gfMcaI3p0L`RKB$)bRsTNlx%AI6W{O7r$ zKotI)B73ZBFV`F_8Z5XPg1#Pzu$N<>^zGwA%I@aHczyPO%gOS#wr1bEQ`g-@s72WXTSK=zV6v~WY3N9-w>hD7B^af+^>GQ(Q93S z!JO^w(LM3&C*z7N!4@2Dm1-9lY`zBojSKhj7XPy!Sd_&n#B&X>xe-%lSF2Z&LgEAG#~Bb}YBm%-=6&JadK z$Jm2ive4=Q(1j3C>sZ<0GxpK5&DC~$qV`!QUY`CUOQBHPyX6lue@_@B0w z(j=UP&&+p7QOMd9dEOKGezzRs1@N*GypDqMq2h|b?pOz4O_fbx0;=3M*!5!|ep`%Z zc8LHT)q@EbT+1X+b5v&6K(c7BoF=-@5bZl;9x7?W6^og3u5%wk=UpwD0?mTcx2vM_ z6@{&8CKzWV5w%v{6X-UOQ-P~8@u?LNWUF-ErFrc3guj*pOb9)_%ugF%N;4aPwpyB3 z{Uc5@_af4bg1uv6+5S~DHr%{$K=D1~ZO!C={rGHB3xBKJ62{62RMggC>fDO%CQp=` zwHW^Fm#HT*K^i@p+dj+u7Pg#P`cRv}fpH}D6h8Wy6`mq&I#Y-F8l|tD^Km%AuO6tF z5*wzezU@vYz?ghKST@QeqPv#W;bOO}x?N_9rzq;+Bc>)l3x7BZTKi1cf#a>Ix!d8J z_L;329yC*D+~KzpWT{M;ZZ8)3$II3(QVs1k+;ROT+f328+e-BPT9lh?RU|sVQkdRo zM*VWWqK8^<9xVlnDazXTi9XCY(NX= zyjt60?Xb)Max7p6$KmzU@;+`~reWLRI44OkOaT<+EoYCxzRMejeZTQk-~idkegXdGoejt>^v?#KRBsKi2xuYO zp1iK#yS1+LQz;uB?Sg8VawfJ9iZ{G{Wb?tK%w(QPtYi9{x8kXHlb$v57RyS=HYt5u zVE}(;-o2Dm(*=YZAz+n!7cnzov7d;DAH$d|Q&gbA-rG1%-zv;tuBB(hx&z)a&HnUk&!JXwu>U{u zf$xaBxkH!@FH7Xvb#r1b#ES<>7>M6iZDZ-N2c33uQ6QhP2 zoTqINBLulEoYd<#TQ-gd21cl1 z$?yjCst=L=`GFPAZROso@3Q{f3&|(R8fQk|%bXOi3tM;$t>!Rsst3x2&X{vOEBm0H zjpz1&nV9n2=r|nzaEp|=H8-r|=C|Hvxt3-V8E?sSbN+RqHq7p@ntB6}u{F4NoL%YK zunL^!#jWQ1W;ln0{t(A~OS66mIC-U72{V1dlf^#yZ;JA|H%uDp9aGM9@>G|$L!W@l zBRIoBN|y;m-}dT@D-mWPoI8WHg_e!^Ru!Mi%bf~q6^cqne~;sqpF|;gbArbxC4mFC z8gwro)D}=y>4v7A0~%vL*3OcxkZH$8a*a;uJdB7FhO&AB+t-<(MaMC4`B1Czol4Qz z)J#7SK;e;RFW)z5utPmIJmr@)pP?*-VDOtGe*UBOig)V$ZTf3NP(g1H?} zx!VZh=-#&(;|8)lhe2XHjnobGU$V}uYW&Dn)i%>wnwb61jMpaIzl<}dTM7(Cu4g)) zq&(@IE&m+CXow`cr)PHf{*+zHynw0KtbM~Scajsld>O-W0IhW$SLzHL1B*s(`Holl z43V0zLpHv!<{aI6F%J5y)u0_;VV;Z#e6}`r{qCRf&^wbm@Ss)7$360N>(buOr^$n5 z!wK~W9*xXQk4Q0DPgG8cNyQ^??nkYHhn?dhQ^J4elw1}+h|l+LQjk)jNXr%KRy%*< za(bmazXW6Z$R+lV@b(dtE;Z%Z>QkaZ5O$4)$zFL=KApf7qG)Ey`hVD#3p6*5DPPhx87%I31uzB?k%$GKAMf9)W%5LxTLP;=38-e$WdkFO3Hh z_A;*RkMEa0YEf&}Jt_MzSCl8q=;wQ}r-kv4BA58!w=sCqv-z0MuQNnpcK?5%drus0 z_x$?!lyj?aoxAKWL)qlY^r*%wAjuLOi#;#(OUI&u2)onn3)P>66J3Gno!{@in}G(p z_7mbWcq_9fOWawfBUh`9ppzrU`vo;hL$K#XC7=RA_g-?$Z3-9`(!|#%?M2XCfA&JT ze%9aW-rAnwlznTUZi#NzyZk(3 zsv~=%H~ACQx5Wj`rjqV>59DmH0G_+8BD3P9o<5CZU2+jDbhd+WUX+*+DsKIG(+5Ae zi#1<*@axLI`r8=la3-Wz`)Ry+wMX_qiQ6O$EeAjKK)q`_^VXFu%AfA0@n%pg&2HYz zfL`M^PqMapt{rdXffY8L4}M9A7sogLItiF|>f({n_JN*m!8!vc@ckutf3b=g6xfR7 zW%mc~TC1_4^Fw{VXf<-%OjP?KPltlyYUH;U-)ai+_2dkKM+M6YsY{3?5y#b--CRJN zFVwHi!mh^*2*4GG;o9F95jG8$i-;Fe)jE~<5-m#2v&TtZoYxGvVcD?pH6(@lEr@Ab_=^n=6dLfVwTZ&XxHUl>g2ODj8gYh z^-^>u5be3OBum3^y`M|Zjxs_^lv?acTm>dn?k%a`{X(HXuPClc(=c#eWv)DilDneJ zbf?LWEM=a1WtP27^<0f?uQ>J%6)n?5D?G1YB8YetQ5%WYOf|G_jW;w?#*TK(;QSb; z;S`f{GuMc7N|ISTv$S?S^J#KBXuEhi+syTxLJ~jNz=w;Ljtr5Wi1*N>hTE!UoAiQC z4?6OktNQ*(9j~zVDwm(VPgFWjnKT&<4MJ{O31yC?Jk3U?t7noZt+Lq}hY>+y! zE+0&xT(^LGl;KTv? znRX^Es{E}Ez&ccJz|_Z41)p^@{kTshU$jRYubfBsDL}MeuNFk>mY-FQph*^t(GRtu@D{pJPtfuk-LBw7I#xy9*r zFVwH)>mo$KH%wyx!u~6S|;+MSF6EYkr6o z$#>&L*pi{SEoU*xBS1Js4# zx8prO5OZ|;m4A(5mc`$bRXac(SagZ$H0NiGKPs(3GEUbRLZ1k5kQo#Pccx3`r)_3% z!6qzzVG{SFRxU&KYlMN3=8vL>*da0TBH_b)r(P?Lbic5`YOrIVD9UX)#k^SHA!=4V z$inHOFswuB;gko!?IYP(0|SpCPVVG;kSdh|l#pLX?Sn6fvCl#)%iT~@c~@G$a1w)O za~Ef?uF|07z~|+OX9iUo$j^Yb@)oYj-Y-JRH3o)I8kH1kABB*yC%V1C>&-HZ?ShO; z9ht6s?LG46Q=~(SzCL3ijwXF=!E034-rU#ze)kTwky*qXAl>CSdvK_ZSxSgGR#$&N=RDx7#F8T5TNA4!got{>O+{n zgf2G!qoydhFw-eXowxATt*ZF=OK0OO=f|jKKYX(fxZ*22FG&7~s-czGfkrirP+6FQ zQyubeCc4$g%bwcRrd8-qP!h(G`pvOd0j!C~;~W4%#S}ubmZ(fEh&7rI7^2Mtn?O>! zOy$h%dKX?tzBmMQyqA#zv$<{?N72eFL8~LQ2~-bJn@oK`RR2R$QhPt;xPl>M`7w_hBzZ&e2GEoG?&d#q?y0^muviBRI(?=3#No_^G zp6v&LO4C+KZnrhjDf6l|2PA%!Md*~|-tssuix%8%au#o<_VOshY znWL=BCT|{A1^W!CQq>d4D6Go)-u z+>J*=$k%ErDqudY{bRrcT~TBw~l|8e>)@>9Sj=8I*^X~f!vGx@djY+cR{b5vK< zl~48Kay!)^vju0j>u!W)ZgqyX56|~PN1hH~L(p>;b3sVN@@RPBi z!d61iJ(wDTCj>t3+?a7TPGUOe8Wb7$H^=?MWvXTyeJc}5U>hb^n5!!NtQxISp^cnL z9VFQxMeRFs*qDEi?Y|t6!^zMGjsEkxSK2FUFTCGT8jd&99Z2FnZav2Ghtw16`?tf+ zoE57(*$>9A>2OSX)pfo!$9#bq$R zJa+Ff@FTrULVkJ@f>6RnQj*@-gS>>Mmp*h`NkJX>P9>GF58V*}5;qslT&m7I|5UJ> zI=g85@KIV!X8FSNSdEq0s-d5mA~^Y35-HjkN2>OVHr8zbm*g4lH%KT=`(0}n?rG$V zf~-%t-#D+dP0H6uE!}c4sT_2D0Z4l4CVbbEr0VMpEj44?hNr36zeUT=C=lulJr@0R zRB}KV0G8+~PG@4_6!89VPvE=XrVl?&e7jVrQCIH!hzT3_m`96+68qi9JH6c1V4%CK zfkh$rQMr3%yJGR$!{2NCRw`)?K|el*=pM(R_m1^*SK9VOt_=Hji?nZ->kD516MXg` zhSY}`iNd4Nt20*?=~v$_(TTK71h^22HcFhRO}f@P$lRhl;YS}c{kJ=5o1-JCD}VDI zT=a(`IA%SXXCugwT5Y)KQ%UQH72`d?5pwD>h^B)u@d^TSg5pZb_?=xYzgQ|h!JGL* zznNIRmt4+o%?Cx*nNvRq|8Q^8b3*6L+?nJZ#i?{|Zuz~q>sv7163{BF0T#TqkHpIs zEPs>vin=IL`^}||qao@k{_WNfIKtaN^bdBksQxexm``2NDQU10Tp9DrEBp$_^XIQK zXA6wri0#8u@1qPO{I5GXAw$FOf}#DA97~xVVPt~7(^Pl&+$o?7k8V?W=#~;2R2=q{ zBtHChl}V*jjRn-JDN9-}drVP$WhjWh6+TbiZ|7@*ziKQ~&fkgw^w*bl6K_Op0NinR z(sK5|pT&9T6Nev(E?6(bLCXLUFg0V-Zs(Kwy5S~qmOUBNl@NULHn-5I z{NO-g;T?dUu*U<;v%i9}p5D8Tpr4i|B$GM&)Gb8hn~?Ju#yi)w59QQ2K?+Lvba-HUzDp7Pa9Wy73-8OX5W_$QD`-1zgR6t#5-(NpH<##M%^~-1{9hy1}35``fNd zu*rEZ*aFTbtlA)B(yAQ?v`D|D9KW(9=N%l;W=9gB49uID6doB>g{;rMaQQiUejsd<1)nR+}|KftsH9CTRor9K4M=b|9UTnGe`9pYy) zbyH6#{;!=w{sqv9;90>zDZ)dqX1SNvCwjZWmUM zm~Gzy)V@6wa?85OTo`dNJIxseolk1{ZX^1F7B{1KH{$pJybs+It|nE_CPQWJF=P2p0JeXob*!tfxjcxzz?5U1rTTqQg6JxV!-l&}=ju zgY5-kmnpVC%0OqjN=x^Hv<0z?1-QQglCID?O4#&TPW`#rFP^#_ji@p*L_b^POUz5t zl;G~5g1FN^)Y76y69#Bk>{7xC`+|3yIPlSj;r6# z5MlSomg~&HZzC92_slPl0R_T<&@U2To5Hnm^@S!#cuwTsM!-L zn(GXo2OOLP;sbJy>4-A^PL_Yi{z9h$X`bgFJq8gbh0}fUVH%9mIzVqZ<6CKQmN(yC z1_q+POGu1L8!Kfg?GK6Kj}uEG4!QmOb-c|JRpV9|@ALT-$67oNX+NBacp#cf zG#gdlLr@Oo$mw%;JoVHzc+eNsrQdpIWpX@DV~M{YiTRjfW1O;bFhRY-%vJGXL4A=T=db z_I>5+#SK|FA;?e9)P!9uEdqxIzo{VZQVW-bfO@pf{Oqb}9qYWnT$0fKJgOKm3(xin zh1>Dw<7rb^y=p`f3r_m8u$6nyxhyO=|>7upEQ|0CN#XA+#*I)(@ zT+S;j>&4zfDmc$PTX6;zS732+dEOyfNS8;BmmvGU; z7N6g>!ZQ&fx~Ly727tmSv1o0Qw_wN)>4UkrbGqwz5iRvs>J{z!zXI2)YN4={2E46- z0kPF+SXJft@8NNBi%K_*8q&X{M_266Dpt_Ce_$zzK1h4DM2|8tKCHzT_Om9zZSsGZ zd&(?(6Ub}M=u?yGP3caZck331U?+c`{d$>zBo9nw#YVnR`kCk{Sk&kDk6c&er8uTR zlVsNxc(MJFgCe{xPtnrI(1etWh44IzZH^;zm0iAnpqm$9oZN7aY@)t$mK(mFQ>K^% zvd=&GMA<~uSF&67!zvFpAcTeAI&O)T0R7t_Go(@Z)(~|=XC=G=g6WYcw!ucAzpQfe z32ZIBnTKVtLcE5pjzKH$PntFMDNl*b#|<+5Mu`fc*o1#W8u~!Y0q@ft_Bwt4W{v}{ z6OhT_&dcAG4^?gX*4vJl{sE`!UwWsltnJAeGVza7b_JVvKJ;C73jb9h(PXK#TbCL* zqoOW)O0YoeHfedn9TXqn&e9t=zvT_PmVeh8B_3e0F((}%1XS9rDy>cU&bQ`+2Dvke z7Ko<*J@x`ntW=T23ob!tE2p z{Tbj9lwhQXjuZ=sAH?}~znAi&k3&BEZw|-f%N|pDZQ8X@2|0g$u$S)bQ^j=ccc{c; z`Ru7LUHx?_%DV`k{()!rPk@GpI@P)dWo#g{IijIIMZkVYelI~-fGmJNc(s%$&fKOVQE**_=en929YR@NxFT$lL$(Z2tRy{`V2@%e>1j(pN6+ z(j{;yjcLK;_|6kvWEjjwB^ogf^j>@?hj27Nif;U?4gBvX{EtVLc`rfbX%$&s7GQ!K z`-B^$|8qqfFE>qy+RaDG(0(9Sp-&ufhIZ?J?C|De695MO9uvLz?>G8?WCLlGzRURep=w@%5SgTGt5gYyu_(hF%vL&M$6^KF6rSQXj19`1PQx3X~CR2 zj`me+U;KPWl~kpTIdn>@B;SDK`>%~T_5DU5_gxKF{J+?H%c!`vZQnN#2p$L|xQ1ZC-Q9w_y9al755a?55rVtBy9Yvm!rcpZD|mD5 zbGNL$?>g^&dhfN{PHXjHQf*Q-XU#GC=%e?4|8?zrX*Evo?UA5hSa@3#ne^S*o7JHU z`BoQ}!E6uhF}>%D3BjEsPJQ;x0Ur|Qr+tfSCffyD9Oo5)%rNe(AZTU&GnIHu5V900 z8>wC%e5UzQCWVffk?$e|ZF(${oi|mzT|&;;vQnS`m}uQ>g5P~QZx0j9H$u+*lmjbg zN@ke!Y#q!}Vp?APr1`r4Hdm%oY0@DTlRCCbdBxnvAu7vWr5fdIf_~5CC|}sDmZlF|7KsbW(1cug zpG<&bXy?-z*~go4^mk?>D;%)^9n{Zb$9R+xS&yNtftY2Y^Ir09ZR* zGE8{?n`!i4vo&xGps^*u?Bk=E%QZ{bKg`$5Nn&WsLuPKRPky zFGzS~Uc6rof3cee5FSS1linJpbDN|a(C|C;Fk`mmpC{#XR>;6C4!FVwLE`(syb&;7eXpx6RX zajLAcoU~oFpg0vZR{> zCuXahmcoi4ka*=Ov6{)HoA>KAm7tALzsSw&lI4POKQ$WG2?vKEy<|OUNQ z+5KV?L(9Z;aiw998Y_T%6r;rZEw_A2ROIg_G?}nx^6xBw|FAay1+VNXwoIOx+BRMf zx~mwFEu({;Jupi6_7`c@lws2ccgf~T$SrMb`aN>YTG=gV82DK6EDC7OO0b(?yN52} zm4eIM0@Ds=to!sXE9Y6qtH|63Ggm91ue&dc8(zu%xExh5!J@}x_8-YZdb1hz*ES$- zA28W0YpCp&3)9;ofLAd!@@`bG)<`acozuksaV7Di+DOsB*Qp?Ql#Pm#blUX=T1Onz zam5!!cFSINBafcWBF>oT$@Dt29JCFQ+=% z<;^e{cqNdiYBEq5jWl~TD&9F;y4tnGw)fM}$s;43%We$a{9sF}`!VR%hz2zYob#ho zi6_8kH>)+jvJ#BY_=g|zA5*kJ`)}WO0ItxoE>We2+&j}y^Qt*z(Cf6B#_QNzjU;t% zB$cY7g`x0b5JhQq+GQQ}?$}@aLdcafx_7+W1RIQvcV4rrm8!G1x*XWJ9WC_}sZ6YZ zQ+4e3?K$QIoc@uz|N9;O^C{bm|Lv(BPTW}+SpK&2R;g7lhjmsdx03f~sWl&S7%kVY zUHHQ=&14n>&C1enEsmVFM;8sH*lQh48kC$ruh$tq@v+(rEw4*u@V6QFBVY7ekYw0* zK27QV_WxcgUJHHAo00v^2Gh2dwd>V?4Dr9u1B@ZXz_x}e7Pep=(S`zrjDy#;QoqZ5 z9J>YWyKdL-OzHcYjIWegj%b9( zw5A=oEzzvH%`W1z2w+UxwGy3=TL50%KX3ZaM>>DNz)`t26R6}JnyK|#>gFw;gN=89 z0UGnVU;@O>iu>fbgv*&Z)0Pj6Hgl$B^df-I)xZ%m1+3GLTXpT`^`9+c_}AY$pBG!C zg)MT#%-I1X4Y?y8*X24h863$mlfMS<1(quT0KZq6SMVr1X+Tqgfo;=l=xa6vVy6(L zI0>n4G%G)Sj{fR`YyMs%vz6+YYrKBuiYuzFN0shCR}JfB<` z>bJZqkV|`Ob5LYA*!pC~mSfZGw6oS5!?b8q8=WC&*^}Mv^o~}|35FUz;lCRfa=1A{E5Pjk`WahJIPFIfbMBLA-BE@2ku}is$?9I0v#U(AV~M?WCzwTrN`WPT zYI_=4xd5Gi1NS%3-f~><`O)Ob*}0Rn>aC;Ue`AppVd0CI6NIU*(h9>evIn4_0aWLf z({?Zma)%EfaM>vb8^ok7MqmwOAe^)=7wtp`5tWeTrgKOGJPkI+PNp88+m)g(uAI<> z+So4pbt|85V&*m0_YfQW zk@$cKdgtu%dmz(f;c{Xi^&;)BA+BAW^i^_MUTDtJots;E(n*=GcQq<>-&jO^W1lBE z>GV`Bu>`t#(*HqF#Q#zZ%{`5-9(O4NfI$z0&U}p{emW`;q9};L!b;2wq-#r?H67 z!&*=-+mtqAtxlyI2LIQS6hJPsnW_K33?&`_;j;jJgZ^^zZ(l<8=ZsNZ-))+&Ye^_g8t0CO~=Ij24a{d2f%Ju)*1L}wT zc!6BTkAlB~l>TiV4I?TJfHWhzWD|b@`?CMXCo8$RHE6ZEMU$1_!1q%B_4s#$Q!S{d zaZRdDK*pw4IXx_Qu3#W!0F_@XIZyxPJpO}o&Hg{=?f-Mp+lXT!Ot}BV%l<>r|4#$_ zRT(hl|MwJkMM}0Y!wMn5$&v+j{s7*#ue^!+pP%yYN1r}aS^lQ1^@H*xWMyT2{#EWL z5FcJ7BP5)L{O%)WVuaIwN80v)E!q42zlJh^GvCflXZc6@{y&`8f8Gjp+Y7V9h;hU* zF}ze+f;e)`Dd(<=`k!cVkDh zs^>5pRcFvb82n|E1WUkC#&UrTY!58_Y?I4U+tu)GXb|KK&1JY%wy^&9Ufk6CxS6Vi z{kf-*I-AI7(l{DeWG0T{)oy6?{|b_;)JO>Yfcrc`ha95xzxVN|n_)|6L=_9RKn5{% zl0x973Rh+GKRSPmU5HftyJh*WdH)>|ZrTSADX?5Pz?$EAf<-RP?6{fcJo9LwIKfu1 zwk%D+xgd?#LO%W^mgsL=Pa7gwfC0z3SZ{yw+U+p}=?|!&&S0Q(;=dY;DsjXj1w2ex zonRnUu>&&uBP3xbox)(-;&!y*#A1KU#|j`Anwo(wySc@x#osanJ*P}r-B;S0Mk&*o zKTnPW@a+cXT8n41pU;BcyD%i;zrxNmcyhw^F)_VPNJ$Zqse``$nBx6HD$WD@@%@)C zb9>)OzXxT1MgF8@^X0uV{63&@LxSn>++oRRA6?!){=|edG0Hfh0XgjBfD)1)HOILSvqVEgR~@_goH6pDgY@zRa1Al%_0R@ZtEKYmkV(lwGBsEm> z84I)LC+PVD3GE|h*lCt-tvMQvYE?j#pX4ObI^l%L@CA|~(Q5-bfRH{_%G0L}qN;sc(u&-_G_D0N$(6lBKJO>%A8q;{PV1wB!=Cg%% zNybK7SIB}m$m85GD;x=w;&t@~NjIhXCyZlQG8f{sS5*NjKAWO@gT_gR019iYfU*L} zza!VsX=28KkzT(|j8Z;*!)@wUJzwl*Y*+0BTe5n&wg*29rJTR!d9Ezz{>|VhZE;@) zx7Bn;&`Ib^gxAu$ID{Kvg{1((TbxF{tf!Q!lArN zG`-s|L>3c^Nk*sg1)H}#1P`aH+u7_M>psYs#|k`rskod7i)Qh;&47u7s3l@7m3gYr zsMi(Q`Gb#_YrJoWPl%M8G0*{yD2-Z)YO<;{bvo~j&fVUoW%aDdKOVy04nEnyDHqC< zZGYsQ-D~pVe*ZHxE!`ERI0a~JO!w9t6TEyTvWkz1iA^W@Ix`{Sjm&Cgygio>X zyKbwK=!*9yPV@BXwsecj zzG-ZF$iYmZ!5Z3ff4W>cU*SX;=USuf#SwipgC5VQN;JsjfMVr--~)*mw+#V@4JiOl zO^C_j;Z52M9LgYHi37^g_8W(?eJivc-=7JS_kW;!2-V-Ye-88v0y`7z~e!CRV=PuccL1K*>c4 zPHX4ZZ0*aaff!!{&*8T&R_|F@rZ6p)X=5Ao+r6aY--ntVHX#AYaqa-7l|}`|K*g2H zPEo_y@|6G>XdP7~4T2*gG3y>9>gN4%>aVGMYfhC9v$j7ox$|60lVML}JofLbs2B5w zYo(jKRD*(RZ?&VhKLx=3>+4tvcK>%_vSiZj3tp$w%*T2g`)_3Ne4Z>wXy~)x1Wtc* zB%Ayv(Jw;rR=J>s`BvZxiV62a!z-hWEt+%i zNYX5`J1jXsczwmco=k4>vkSjzw4SM0VFHx)G5&df5=I!Y3 zS2yY>DPD+_gl}LG-~TkdlHj6G7n&{8x*}M|5IZhdN4r!Ie9+Ct#QJGubO%qY5-Yf( zs4!wa6sI4CPVh01l2@=-qP5jjB=Y4=~CoCLvuCZE}iw zxsH^?Ra>v=(-7xs9HpGjhqO44TiqDN?3d&tnY`}uNOYTKK&Re&=Z64ftb1cH&Lt~D zIkF6gwkLVL-EV;b>W(~6utZ|_D0;RJi^>k;t$qa!8Qc<2gFRJrwx{-UVY6`X;MUD* z3QC)rt}RfVIJHQ5nQI`PB&ia7A3O3AT@`#Klk|~BE@|kgld;pO{Mbde?(-X&#K2Sm zn>h!qdavl?2~p|PqntKx(&V-IfK3B`=y-8QYj6}E>&O`1?`5<8^y~RT)+H~!0quDB ze|?z1@v#(a^SN0fvKo;Rng|LU6^+r(&bW@#=T)uY48cf3UXK-frB|GN_etS-_r362 z_cH|N6D6WITl`Ly26dZBquyB}upNGFNPLRz%+{z0^(Z_3_OF}U!x8H~|B_WWFq(3> z&SdmyIe&saXm1NV*fg|pJ0((#kZy9;g8&JGXc>z_k(9r74=KhQM?D2(6JuNmjIBCE zhjfXM($^8aYb1#=g^6cWzYhrp@dXe9owhC4s+~Pym#WZhH(zFLvai}11a$WKTD8fr z-LS75f8=0dfBSl+PE3c9Y$Q2-CE}uW29`cr51TX+d~8x(sDl)NA;M{g^R=VD71(wx-$tI-rboG~+Sw^TUQdqUa>o0E z@|Ex3N!#7JpI#!E_Vm#(i}drY7U!xKDeC_OapzJswY1NvdHWl_uQ4la@Y?Xa+-v+G zdXIX(n@u~NH-Wz3{gTXt#o{L&4x|3my1PW@Ma5uf#Aq@NIguLO6Q32?;aeKz5x{(y zjV9Bx*gcAs%J+YLm#30=6Ytr}``?a^kMD#9uU*Fb& zgaF21Sz`us{Pp~9?J@|R>{peL+tcISmCmkaWe>Zo_OWjBRwxb#r>#30(rdaI7>uy} zaDs!>=Ac{dev*>+X}!nm7vyHf)N}&0?0TDR$exVDpc`kq=spA}9k=$XO6cD?U$t7R2<9VrFA1_ue zKtD*d4=4yyT>YTbM?mvr9}rnXM7Zoh$+|KdOJ!E+w2eI#0@63LAhDhMF<&75Ys|lH zJ5_zP{ZyNR?&9BS?5?^T?0LTR%l@Z@3Ne4-bh}-n(0x$jold;`h6Q&6gDa`!RmO%$+WyD#d*Ej3eRmt*KIbx zxBT_-GVLA$2pxnv5#naM_{qZ~^?c*L`(E2yN;k_Q{a$zd~x*a#BR)Uc?* zd~<(wR30gCCMJ4!-RfD8-eQ2Svsz2Q@dRO>UL4cP2H+|a`@5zcKAz?G9mhf+dfeZ3 z;GAYfoRE6-c+>*0xV=N5rh{9enz1vTRzct{L>?W@u`LukH%_Jp=*C`O#IhU7ST@1_ z6uTBluiK;@QkGp-=oEQHBDefPyu6Aist(u2C(O9c*%qrT^h7&(k&X+UMrs@sk zqzDqFD`*%I!LE{Co$8XuD)|c8Ep)6@15qI=O&+f;C!6w2U(3^SlG_hgljCfaYC*Oi z=S!KHc8AjROJ*5J#ih3rFY%Oz^CQ7#6K*r%j2uPHw1cCWh?O(>m7C8;4>y^!)@h05 z-xQ$ZDIx`J&xNL=#2;JRd zkud^mqpqnEB0V)j33Adh4h^JI@hr{eKA)dG@9eGxtyU&EjdDeAtiIaE={94Mp>(c% zh)z$8A1mRH+8s8*G5f`tVfGAS4XXG5sEU?EXGQlO;3?;ZGY(~1T4(Z^w4fK|S1^a_ zs@V$X(dckjPJ5Q69v=gDZ(3q(bC`24?$X3U*!z2DR130|roK(ldHx}2JiSd>KC6k& z?Bfp7XIU5W$D}<(#Z;;9qRCqHkB{r&Cy)pNmp2L3nrq;!cOO+hQLVPQ{?=*5phdWgkA*V2V-} z!EgIB^}Et}%ld_dhYxeHBiR{f&L5^Qs7Witm??=Mtx(FRvE-`>TF)9Tzv|1{O=pW` zPMg_kw#XL^cT@K~BxS?#!;tLse@=mq#1}n8Mn_j^{uKs!jgI~siMwXk0n>cN^MO7P z{B4=kcwI6a2C_U6fkxT4>SY~*l`>tz(i7);yd(wkYl*M3UZs(EK00b$WYqf6lzuB; zS=(`cZtX5^oX7q(y-M^I<#gM;{it6G_Bauw@U1G?9LHQbnU3rvig?@QkIqt45W_Vu zjfzK|uvpxE7rA%S{3&`76n7Wl(q`K*hAW>XNoq=meYVspbhV;Kg~m~QqH^eZ7=YY- zpwO3x?GyvKWL0I`(D3m6%?ZH_%kuuf5H?F?nLDSn?Oya1mFz6XdpS=%Md(MhmP9p1 z(R&x>kTjpWbFDj!A`Y69x9%tSCrO+7sS$-W z@Dd;j4b0nN_?)tCCRnmqPHo|eyoGB4nh0xcWarvh(XXuEy&x+y=!B8?y>KUtVlpAQ z8BU-)sOIn;&jE1{yh6jrHL2kWZ^Y$viM#$_)5^9z7CFq z$f2Iih84E8d;c)G+xeNbSgbjUJ$@x}d@aUTdFK2KRddl~^oVQ1AHXH1W$xrnZ+Z7g z7qYs~Jn)lG=QSZ0wFh%og7DnlwJ{EK1Eyk?;p^H86NgccSS`fu;iO*2hol}`mv=9A&_oi*DIk0b&n=HcK@l>(m6 zVETJ|ETsgg_0U?W)g0HvFX!QeD<@xq0(dZ}t4`ZI;|VOaYFbSC0ACX?N6gTX3-;=(v$wqwZ8mK7wnHRSdzz5)kgW@AU?`Dn*$o zzLyG%vdmjOZDDclk>+;V3!15*c}dxVu2}G63=9!En|gQtsrGe}tXjKVHInOR;=$USKg&OG5<+4%v`5`Wc{@xA4Vy_? zhvMic$vTCaPMbJ9xWzHFxb`wzF4eT9U%2R6mwSbp%}-@n2{6`ATWU_@SDFbPPi~Wq zq&f~Ka+hP9!{TB6R&3Q1`cG19&7ap3I0`w)pj#b34y*m7*r=ZQP7RAFPg4$*% z58}f!fkjWy!!WDRZ~q8+p8vku#c{Mozxi{*oh{rek`o+F9n9HF_SO*dwQcVj44d&S z#OrxgE(7j8oS!UL(U&iXw6@u{#+6_hetM->zFGydZ-r2wt-*vJ`NTkg|5O$WlLkU%9dpE+|abe(R&(!9lkp7uLsqfX` zk%^Ki%>KuMD!cNH7;RC)4s6(;3>F%#ts_`)RlYc#1_hsHpt930(G-VmyPo+jXBALYQU55lrVr)kJAE7o| zCXZ)6z5rU*;WJ*}jKB!%>ugS3IOHSeh_TqUMmms>k696R|ny46Cx1%ZQ5sNaAqZuBT~hmPI0^F+lh&F-lA2*?uM+} z7uy9}0l(q`cLARisciS8M8fsE8QpQY`Huajgj6_m(mY$uquY$pY)HW5uO_ogcHz8* zru(XNoG;V6;++dF#XoMKP)vyiqc?I-Nnkt;vgaiL67lOi}m-Apuf#vqEI+S=+|C!%qZRQSI9} z)3hN*7Y`Ztyqd|O99=M0t}-da;R|~wA}m|C2obo}^!1bq?F|A}B`JDCLA4X|#HqM$ zMu@NMNB6@yt?$Y9HwSqbvqoo>Hy^0?VpBDAVyuTv( z6@DtQH@WUfONjsTZYITWdDlUvkz}d!n@hzREkbuXtp6~cfBG=2D zk>$lz3`eFkOVn*9hJmUfZfx>K6w-?hs5T0ZVKkB-YmU`{AQu0*iRhyptTX?&JJhK{zWQmnSGoSxHK0&N@_Q3DeSR7!8y&g-4C6#%#GOL&Zrc&>Tp4>Y46KE>U8WF=`J1D#((u4euxK=SkWcBa=)2_h(}v zLrA^5`<(TZ??$>6(Qn{Ue?a0i^nX9xzQg>c;Ez3C+dJ}QlPIdS8v1~qsu#P6B|U@! zI7D}ziF-C6jnfw);zdjbXO=)miR!*F6dXn#{tIJX#*oyV62Al0;u#*XcX0ZGVS=;M zfmL$k|2F;nVPiQyGUTf^(U>@^eJWYzh4tSbsS7^J7CgZ&ZGNa{Jx*0lSlJ=zmTDAj28(D4za&GNA=HWyZ>pT z-SnxlO%xetv_QleN+0^wEam#63+`bVreK`;ST;n6w}4TDQBsp^gT+jW_GK9Ow_L%* z+6u~W=EO2K+2riw_N~tOnT3o@{ldC>HKRpYe6%$YBaZtqG+>Ky&tv$tKEsNBihk`z z0N<$t*>F5aHyY;7I#lgs4j2eLq{P8ZHv~Z%3-bhB9a#CQ4l+4;2y=jK!nh3WANQ z&?zgTmm0mIMd9hH>3+JVk@<}o z580l^M3B0AjA4Y-cQm=2QYRd(_O%VKeGH*l1k0ze)SxSqC?LLnxM4N5=|o^~LvsH| zQD5;yHwKi|Wsert=w#8p?kVL!^_-rA4+&HXj+kbXqgJh-ru@ zms;3}jLvrUi-3;#;P7a%(b{oXM@zvIy`(DoG=NzUqhH_8GhhsQfYM%obg*7~?X2q2 z1vC)Qde#cx1D=c`wg^WDis31rk60)Jt9hUC(#uM80=sWl*!ong=6(l>*dz#eiB+bk z*8ctGg|-vjV_H8tB~KrN^fXoy+^y17Q(1aR76C%CYSqBPG)=;#HeUg>(sosja(oe+ zSPgfA3gn|($!K=Z2Q=37+>Cy+Qo<7Zr%3Q8(ip@%CF@eYq8=5z2g_~_Es|{*Gj?oVs;mjx%$QR}nTTrrCw}BkD|AIPIf!DszJZz2eaVN#DgF zH*-W1zc+^p+pKN;`xr)T4`)LWR6)1J5=w;=4gL33RYg_Gw5qsN&J2h)0HcHAVuyY* zXJ+!*ef6PhC1m2kyd2kKy$M-CfD==wyM&_wrNdyWA(V0Eg9l)(>l_j}73><##t5#j z$Mi-PyQ5~nc5N>)dMbRT+mi*XY>B*m56PEOOV?L=^7vNr24Q#3Y|mVub5@&PwMqs+ zwb$A?JWfm1n`pM+7gGE=Y~sRm`})xZJv4TTBUZ#Cv8~Bo>C-H?@bXcP-I0AZRb1aS z)KQyb)lfp)Pfg|Aj9Y1V{p$R9ddbs2i53S+cn?$n8)w316O;ufit`HsDTv&Qm%hn& zqqGthOCN-6f=^){0Bprl;we4SiV)z>ia`a{zm`OkI{5etMpV=3@e=({w%XSL|>#Mrs|ktzlI zLFk{an-_sa?k$%|@7ciT25y58wO-Lz*0WtTyDn|1uNdE$l(%y29^oJzOD% zo^RWpo!Ec3)*#Q#h%CX!C$`B?D$P_l-TYZ2el?>9lDgRCnI+<;_DT2Vie3u%i=Q`T8w=i*4_=1R|ysK+LENLjmqs@0OLEu7ekZL2Se*n$p>W~>J+eXguO%&*BA#o`Sv*wA#e@@MwlIwY1vzi|jEavlfcJ8x)xwU`n&5bdgKlV8b zVV{RWZ&0AfG6*L$n>lRu-8T&Fmwt$qL@GE(x3mXyRQK*j^PAVE7W)K4QTmIGhemZ>} zCfsXQyhZ=6@3@h@2n#902?d{uzfoMa2Qit-W$+dHV@Np89I&(K$c<&hDeU3Ep6ZJF{1>)*{r&3O>UsyXC! z@sG**P?Dk7t+LOiVOS*OLYvO0Z~C%~#+glW?nE!`;(z?I$0v#u|IF#yZd+S)!i6P2 zrvKC9aQ^qaTciKDE;uo-Vxgfsxg0g{eH(OLDW4E#)#* zhCr%`B`rw(&D3o4VB(kUL;4Vb!-X9vg`)x#l?{3+wVSe5sT+aq2Wy*f^^G{N={b>- zU&8(BD^uX_Qg?&-X6F=CH3&bh(s%8d5}pqu8~uVy^-ptDTq=_EOwi+h)+ONcO=p<1KZ;3JNgo;Ddq?weEj!%Gxxlym&Yr) zg>7)g2fMPW3}Y z5QX@jvurr(Xju%U@qU0IPSRWXQVD7x@m%9@5Jk}E~zMH|}D^G&1j8&sI&GXR> zrH(PRZcv+_Xt5EiEE~3x4VTi#nqO0exJ*mbyci?Ye2SBc>mCj7ixX(g__UtC@i@{9rs;xE!p%MN2Rx`ErwUQasl+Hvdv}ia~7-ZHkG1&QI zB@hyZnIQ!HoQpFqq3fv`d%SG=yI|aos>&sh>nZ7X!|eEeLQG@i5tfmrCJ~v|lMJu> zaohDscb{?ZAVI%Y6Q}Iyd(+?hzb*32Xv4WSAg*>Q>sj}g`<3EpP9LXr+Ii3+>`YNN zzvrq+K9#E5`dU?Z)t9s^0n!NW_Wfg-?e>T3(O@fMBK2k`if-6npfa7tiu1nB7)23Q z^Pk@sJ8qS)PZp_?az%)?$5X(YIxG8$ji`r!EC!U`S-PKQ6Ev068@_q91gxLU5H^u_ z>{gc{ex-tRbe*b7CdjeaChk)>i2fvU85RC(%jaLT30^|z*7w2U!W(WD+fEM6HmpcQsQ5M>kt|%PCRqGE`rV=@_!sBSL(*MSdX=)& zv*)9v+xlu>8?#l6&h+&5lzpfbm_62OEw7tv(=H3rcdLqt)*2mUV4^g23K&V1o_gIP^vL*hxF#fSfVsye)7(^yI+CvM#T*6%Ra|u@M2}Bw4MW2~t zaB#?TXZ4i)^*;%}^BVmE$L%Cp8DC7>au-^!b$t{A(m|rdg&J>F_r_2oq%i2?V7)3+ zu78v#=Dp1*SZAexw_aUp*(Qt`9p8VH=kdD?+WNVByO=BV%Z9~!!K#phH2F|Dr@x{^ z9HK|>=>*?r{-isoBJD!s-$@?SJAu+TdLrXKqES0rJcKg5&0Y^etsq!o#bYg~9_V%} zY3*3txEo3K2*WVf#6h@D#n14cC3G6yt#JQF1w<2!Z|G$(mfPHU&(7);%3MD%RbK)! z3NOrY`M$QJO#0H3oV>#;{v#7IOOx=V8~8B+n#j=HXKP;EchX4c>mv>9O2>5ul#klj z^+I)(>$V`jl6axDE-xe|*{5=yR{0fTehH9oMH)q!Vdm6b)$yygcd{PY3WCwsnWt9^ zRXfquJko^cJMgOcPM6yJh@NnZ9VUgv4D3nZ5MEj!w&E(7E~VqXeXNdO@y2M;5K+OD zlC`=atopt9*VJX+l>snxx5A_;yP#N6b>RNeZ}{-QmehARiz>Fm!f{o-NK?4+T$@|X+izyZyOjbAx%a`+9YiX6nK7*SFEV6N2A)iXkBKvl$@^&6Ym z8fZC^&bL|}ERVkR$G)aezKQ$u+!}M%SBtVrkc(Y967_x@{o5=c#ZOS8Q5lm|=M_yp zhA-ljc9PW-imY|#?u|h9o>9LoPXQ$m(Kw&VdTL8crfIR(y)psO44vdA2w8l8nDEAx zI4c^%@r-6Qqrvn-r0N&o^CqfzwCk#n2Y&sG zy=c_-_s&C;rddXq{f0SW4G%q9dVnc@c0t--s4UnTKl#$Dq)V;heJsJ$sz&^R%U+SP5U8K_ruAFZ{&|rZPW={%F#-L-`pTFN<}6?4dP1;iV-I zpP0SpJaw^}%`qk&uaU>35Hg~CgKE?1_p;o9*@M<6MB977?u}B8IY!WsYYz$8r0(xS z*XnFZ`h{bjoKU1=3y|l+$9u`?dsN75c_VR8y){4e^(0dP6D4FY_CaWjhn|vOpyAq; z+^ciCxBNB$g3Do3L_wMglxNeq+s|i6={VrBeg0m&QRn-I(%^2Y2dL3%FS+LFzB@cr ze39T`F^vy1aK81^){WmTqo~S)NtVxUrLEtDTLJG>naMs!bPextorl<~s9|et|JGPc z^L;J$xHiQ{JfmFgnTP(YQ!pg|cy=EwCbyuCbk??M=bH-%0vh_t$R%U6`MJJRIz;nd zD$>`MHsWp;CKl}sqrTiCKTKRc-g>HS+=M&@-K~x`7rC;p%}*8mN>HB>x_5#usQFIx z=p3lI3bv;6*Qs9@F@<4#8RP#Zegy+!ub8bi@opEunsP6hlpi=YC6pyNZWv8@HllDp zdHPXr@3yx7LS1bdUn^$>1rhsiLFfYXD$-Ot`T>p#e-1J z+g=|sPP>>6=?fLfGzFVck)b))=8ZYSmByvE+X+F(1r?7m4V0rm8q< z!_@ZEYwNY*aQ{SUIp6&C*xqmZ=w)k;SRQQ(p}#>Em!H%)E%$xSbA@L`BAcC>dktv*=Y1tjE~rXpqfZysKIFRVcZY_XZhx{F#>q5tbH?IBhw$cilEwA zIkHPqKGpMP3Ylj86-bp0^99VV7A}ia92}F@)A@I@*_WMd$gwlG`-Np4#Db1f3nZM^ zzDLW7HDZqKKZi>zUpGvfl*Wy|CMK+uY?lHFvZ;1*3;Hfd@~~liHSV$K9f)qh=99uM z7C$sf^?5K|P~|rYL4ve-UVM7EwoTHQSXBNb7a|8@(@%+B*kVV2dO9%H2JmdFL6gdT$B3HQ~(5z-#fhEqfvx7){9jrEt&MnE11m}rXnJ4+P+ zbV(cX;fsGHj-;TV?r1-lxeu(@N@O%liqS3F>(Q-NqNybC;1WjR@>C|U>K0oll~AC$ z>~&O6rhO|^RJBlVy+84`1N}IRtdC>>KFs@evp4~6gHWHtV=eGtN{63Z;1Otx05JQn ziI{tMc-npmRJTO(FqV3g=$kfGR{I+GazyQtj>P&|M5zgMxMBNr4q3H09lO2wI}2bj zQt$Ext&hcL=Zv3C!j}^hrsr%v-*r6Qk*4^o|nl)4WIYxb&6>xvjY1X&~`^F4| z=qY?k8~c%yIiDqKk7|hY?X$Q`?6K|jaG zWpk&3wukV?AAR^`zl|FS-f$-2Iw(>`J@F4TAA*f4O|I=ro$rhV85bC`<)3sa^dcNS zzJ}R3eR5_o-NALi7_tQa0qAPzDl69#Pf%gFdtQw&O1|Nt7cABQv^-A{~_`8w=R6Sm=Qd zHQPf+R|zQs?c}dE@?~(aCe)UN6~?btgt7cI`advIb{Vc94dFj7&V^xK-jrNIvqfNQD_;s8;tF#X~( zTq7O*ZL$-!zd-x)i2XAzG`VvIuioVTWlp|H;m>Qj_UHaX1$cc}P1VN;s@!%@Cg#`( z4GsHO-Y*d+rtd;=;8ENOf*Vmn-nF(rKZT$7#>%)Vw|lu>^$CX1DulTPyh8I6w+J3B zQnHr@0vi19!28b+=d^cQ32FMHF6#D`CQbyKOLdkgg?`@Udz4r~sMwL5Shsdx_woRl zk=8)lgI|;lB96p|ukpp040jybTj5~B*YuscnlNN-Ah0P@_9V=uDh=!lDqg@H3*%a#9^eO9CbC@j``xX6DxYJf zQ09&Kt(|AJw*pn*u>K04`;leEg9l}-->zODF2DsnaZp*wJ_>m9UG~bKU+l6d1BCO} zA1FqAY(v)xIc!26>z(pdI-oBYnby2vXXVbipJ{!VzlaXxx*RXrZ|BAq-YG|${YeZ< zDva^@@pQ=J2tg!s8*QA9^S%AZSuc(;A_YhuL^tpIjn;tj2e|M&GA8d`83jDeI8n>?s-3_b|-$RXUq}e!G`$B@9_#Ls4k{qML_6SQE4V z3M?&Mq9vnN;tfnp3IkLsf?IDJ0K?yp_9k{;k0jCFqq&ohkfQApGa1H#0{X-nN`<>{ z1^tZ6fbX4Ai>&OuK3XKnUFGSGHE2RuXn;^7MsAKEjAsh?O1z?t@6MtQe#B|LihG8h zl;WC11FD-FDX<-XmKa{%DqC>qOHORh-n$M*l|JdOm#25PhZpl!-F177Oo}jIgur37 zL}7hF_8fck(oitg?SV?ev=N;UZ~QBUL_v;NC$jCg}OuEUTh z?=?|&3i$z#-U&LN=+k_a5|67_w^U+xGg+!p56+`5nm$)0ZB94v+08c$nt zzOi6jd*t4qqYu~rv^zvV%*}mP1Ihljt$BqLvC+aGCI<|i8>jU_TxaTZ}h&SIiT>g)T*(|^shpqAI4dP7&r1tgEW}Amw z^H-K3)(vF+Z@1u`t8hMz+8aS-=&!v}aWS#y)$MEBo9E8cdivx)B|`)ABwm0B5xLzi z*F3K{6E0SZRw~|^#S(H^l*??C)18wJ94@$>S46UKaf*Z=E&48$QU^P|8S#drdwtAX z`eg3>+!!nfc6?f5pT9qG{K^s_BQp&o{5M&AB*>hlP8Ic)xM;C=pV~ zar|L9YZp!gElJ6jr{cIXi#Hd{dgekL0{c;=C2^@xjLTzI$n>HgqX5YWBfK72@qH+B4QsnXv&Kzg)KK@8|5nO8xBq*WVbbw*7Tudz z0Nm`f`)#Hae0%9BzWj%|Jnd-~c>4VHbq<13j#lLCCuxjVm8CtQD5V(j;YkR)cG&|Y zM2x_$8jg129inH4U%>_>=uC?~Z{Ru1%h9m$zU=;-f31S?v}YuVwb0A&WD>T$mBpPE zjf@}?hqjAV>#7x>U4D)>mY?XMiZiFweB2+;;5ujl}7uaxyQ0kT&`k$XWH~djUskv48@!1 zzHPrrX$E3NXJUr)C!_F!Y?y%VUB+;)YF+AP!^7-fQ!W2`tdHanjaTXMZ^@W{J>x@M zBH~h$qq}G=6@^<{a-Vp>jaLs5BQh(JyUiRc5C>(1TcSTd%IB*6&#V389U&|(H*+ef zxGsL-zx}6q7>i7kNiPUc{x)IU?rrX~*ss6>oMb?krRU`N=}*7yKdz%1YtGd$G-Qho z$6Gj=G*C?b#SIU=5hkG7?;*sw{tEZ03oQ@@hheB7DzH9!~9eR=McN8Uq~SfJTsY=2|tY=B!!&U&RIb_4h+_*lzgaa#yIm|$AAmgFv`?~trJlRYEQ{AR5?YE~h z9}NasX76<`8>MbTCvWC=tYkfb?Q`<;k!qFK5q147orZH1agRj9NLX-i)R>7>Jor6G z{nm1!oe&Zhy)I(;D2>Zq#bhic&p*ZRxT>^9D!IBpjOR{b|Gjz1`)WV0v0I0x!@0I; zQrBgZ;Ux7U8e0JADWT#HK#CP+N^QNDftuzR3FRX3?V@Qi$%K+7OK1su-qhnO?qSlu zv!8znJ%b5ehlt2By8T!_X5cZ|*loLLp7tQhI=^HYacHGUWEe2Vb?8CD$-I` zpa#p9-y|RO+cAKT-kf+NtC2mNq0P0Gd1bNnaBKfMG8W2W7Cjgf07xb{i?$NYmh0N@ zFB&_Rt$ICmNDQUW1@RNYO5o&h>sE=~k1jj#s7B%x`!1L-u0zsgFEu$zKB?D#d|IxM zIS)dQx-XuMt1+J%v>(wJDnKb81T?Ql(*}q22S1fT93KON{1+VYqcxDGWABff$AnEJ&2Qg0`_8lJ5@(y=NW8ks~(Q3=d8`e9mH(2Py=h+LF4--+ycN7CCXI>`7B1kxCyZC)yQn+C7(oYq>O_&t2xU={y$4xcH7auz`n4l#wRK9&98{Qhh$qp;mt)A3o zO~2kfX_r8+eKW+G$Y_F3BW$eI=q3Wk0w6Z7)hV@Q8&Yp1;pNIW*$_udU9n$;39WRm2k zYCh>7Y=UCQQKRk<;0jQL*;X7B{baCjXC(@|V6$5AO3RTymN9+MzAtixfBy1z{` zu>MN+_o4S+ew%O)83+WgdYM01I4y@M=F7MpjWkx9)-uL&eWFVkR`)HH(>0aJJlB|= zl`1h*Eb|cA)>rI;EwA%h{*+mSNvoxlQvZo!AU40^4zkkxP87|fXiV^5!TIkmqJF{v z+L2czX~0ge3A5g6<#Hhv@UrQdf-nS(F2h*O*pk|cqmHR0ru43sH)b^RE99tvDw@vS z>8R$s^(aRsb^+C1io<0E0x~zYShXyP)arLen(@!q{a9N(OX?XI=f;C+y(~_EUUxV= zKh-oBFPT`fu{UX2as9Y#i*86RI7bZD`xGBqktdJb7FQSrS`Ru7D409W_ z>XiT}W{18Ib=6=>4=5OK6A%bxFWW-R{b#@Q3Wikf^+aU_su|QtfYs4rv#73#n3-C* zoZikI3$=Cf@{#T&+`wJiUXaE-g!MIN$gB#0Q{5;9yTH%COcp(PvBL?o* z0Ji>JY0vJ?Wy1G_l=(bxrLSB!fxa?g9VM1Tl%9q`*UZa&F)NHP4*PQt!B9y{!@_*C zd;av#&9Cncu8gCin3bsuRoi`*oL4rSV9n)P5?5Ov2RN zr$Cu4m#o?vNbAfNhzmiA#4e%mt1F%@AalJ4kXTI&8tj3?beLl$89bb?RdhOXaDmL4 ziez(97>q9`t+!rHVAm!13?nIn*$e(}OUXaqr_)*QLzIkMgg_aDwqpqkQra4$rqG&)er&w)=YE()L%Hjy?n_8hn8= zb06qD*$PCB(Zgmo`I*7ofEdh{Ir7k7xXqF;v%hIETQ)F~8ZF~`4Xy=8`^j+3uG#w_ zZ!^Vexor<%VEo3&PIb{57&If6BId1;c*HYTD%T!>Y$#8hn8v`4+w)MdZ@zc}ZAT0_ zP#006m2j~;dN-YZ$YD3=xxczMMG=2$>nsxb&0&wsJp$Jb&=HkCB1+(ps+Ma{3&!YT z_I3W}Z-+_YH<&>Fz$(g{MUI*m&C$r>(AI}e<^?{eU=cAWXBBejNGxRJB#}}&_ z&~^R&mgc{{CF&1{!GphuDE@uo zh*i8S{^zpv`+gHgzCgsh1Du?=rLZf{f8GDTCjXx=U=m(tBG>yvtly4GfdmMQg3G@F=miDh{bt62p^ANa8C5OH_SK)F@ZUXGQnl9r`QmWA!tl4F5(khA$vJG< zE%?8gPJ})Xk}uVPc<-FTe>y3&H1Z zzkRK!v@fyC$!w14x1-AW2#o6g8SDRLW7P`W0o*e^759JMjRAB0G~m_loMFDOes6%h zaJOUPTl|>6n*x8FBR7Iy><=pyvl%x#DVdoS%CwuQm9DgYoJRe|j=7P4q;c-dF!jk? z$PfmA4&7NW*%7TX-0crtr++iCkTZecgO=$q&-OcK@d{=RaBP(wYxKYW&+eC0#hG}b zObOKmP5*iQ@MFT+si!)&KuJResn-Ugz)7IbzX*3b_(=12OXT8`1*5 zambY@4UPv1!opgK{r&xOp(_`+O88nMnBUw2;ERIMfDvv$kl_4wNkZr|2gJFMbQH-yI;OwB=1&85 zw`det^U(hdk01k#>o8o+_cyK#h?pDxK&1cB(f{$c(Nm1j2^g1wX*cSB<7rU+Zo)!%7YHfX^ zzyAG$|6?5CKYSV2Xz=e+5iePbB{#lWC;H#Na{fuO zSF+fl%;L@-uEvP38~SX^r%=gXR)zLjb2R^ZtMYLn{Bmmi9<>1z`cy}@;rlSKCvyJ0 z(P|n`t)g_sqBz(F4z=|wn&ixjH{&1qs3*zSKz1W>IGK4M0B+a#BBAuzSk=Yq-@d`; zIqKC+TvqgtR{npS8yUYOS3bYw@d0Y&6c2Fnnsm5SJ7ic+tx`fLARxf$Jv*vxcR9xt zX>2%z#rxpcc+z&iHJ;Tqxp#|cKr{LPA^7d%SE$Pd%as;&Kp%Bbsd{}6U?Zoh9E(9A zzoMc%P%$v_7BvALTI3P4AH*R9vq?XpbShi4P!Q74Mvvs7`dOQ9*=KA3uoq`E8lz4R zv=v&cvr7E>{8S8DPWSj#?58|WYq!~p;JPu8zl^T0@dX0lKT;VF#YF?=r@LtB92gttBksGXx$E+lQV-;M#81@l0)&bu*byYp_CdP}3B6y?xF%44<@6Qi8N zm1@xl0%zS>OEUHlKpQqOi;j_?TkEI=z=Ft%Rm!gqnwH*MkU>J3Mg!oaK|s4~l6zZ) zX2zqtV1oUp2MfHV|6>Z{PvP|!k@#J69Ocx%FpSdVyk!?w%S~hqSNkNW4fKZT;2ZFC zw#Wrnz|j~`)O!56e8saBmIZj^Kt;oTo13_-gIm+&Y*1Ssg>tm+GgBYX#gukR{0H)n zN3}K&4o{bzL-kwt?NxG`mAd~0Z_EKobb2{XKl}5)x1Egpkjfoti%PBr0rWZ|;%RE9 z1X`8VOb*Lrt}qOY@jVltv_5Bv1I$dNif{1v+;Znz1DkB#fbN0Wqg#x_`>93Ii^q|m z#&T+qYUJwGSlf5e$U)8TABSYFnElEV_`JQa?-r2jJps;WQ#c;G4$!GyRX5e18fxO< ztPe+mb!*1&bNAk_+i5FTse?TbQ&?SmkGmL%fFlN= zq;r58FPUxBPB3l>a%tT1W@bRKdh4=40h6T(c9o@GO+E>fS4KJ#k#c8uqq(HF%$4SA zW|aVvkMltqzpIAiK*F*P#so%}O^@q6c=P$9Qdl8pf&AM$I1og4q^%)e)EI9nU8W~5 z*G$n1EDmT*0egoea|9oqSZL<_ckkZiK3= zT;KeXeroK13$$8qFY0XFPg+It@y%;UP zvP|-Cv~Q2k&+wYLGoQ(>U0YeIrnuvKqSIu{ z`G}}J8Wrt@&l1O=I@7drQ?e_$Nc(RYi8FCR)wXbjtWUvDg&M{D9vQ%c_5{SU95p8; zcg?C>zkDvd!kB-Z0*J%Q)sfjU;eNhi#N{iCHJE32V43uE&vd%rH@QEu-~*oB%E5RG zw9WzP95Aw&qnb@*C#y^{&2zYqVuEQKZT8BluMfWd^d(;GN5HF=dfpT^?Es|v4l;^k zqb#dDWkP-M0vs7S)C#i8$my4B;=Ut9QI@<^JTr~TU^6rIGqj~ zJkAHC_W0I}k1TzKTlBuv8XmI!+6GH_Rpoh)xHn!|eYo1bO6|*P!pXZTg3JjY$ZAFi z-B=wQd{DH+da#=YTU>t#+8$KQ4?@>LT45P;j?hj+L_nCxZ?M}Ag~$D%<~~FSHXGnR zT9O*eVQ=EBZJU~)0b|F=-R^v}LT4R`|9xiTtmR{pIqA4t9&i1>uN2RO{zNI1sra=x zQuOp1n^!MHgJ$;3*_t1rQX!iRs1ty3`Nass`e_TA2RqX0sd0nhh4aef#_hEP^~sJV zpajsq$VN3`{D9C{X>r~5*JyA7%0SBI@35><0A3-Re<4ia%Q@&78U;YwtYvoG@3R^r zVKN=X^>wrkEeNRsn1y)H-p@YZ%=#N|If_<1>TtMOsg)=hfD`ly&%q?042-?25@6P9?narGT&L7zfeT_5R5%e@SU0nqEn z>wyiHf=nhaoo3zVXxmK+4ab)Vo}S$jDV3BmoXn!06#!It>V!M0;j8LL6PiIlK^=(q zl6-1?eQ7(I_T1G2DT>FB=2-Ol34(##oIrOt9w6Bb6z zn{s}5v{>s1EP9uVMcppPTI*F6pphK4>-QPoAKe>3s5IUwku`ijt69n*pJSuXQGyZR z{p?pcS#9dv;qs>VO!z~#pLNrH#KG} zO88zk`U7zTl~#QK$8ehUG1oP@PP@Ks>G_(oArRmfiydho!OoC0owOO*9WQa*?q(u< z@=O?*G40jqTlU(_&x~=q!UgSoBPxd_@78xbn29!h$5#rf#8-I~JrGalbR;G1Mo9&2 z6)G}s#;WtW&%dSD>eo=*0T8_SLJB~YH#ulM*Jtd*@$6jZ)A9hzPt}N0CFo+PpRFmJ zwT8aH^&c6Yh%v!fs!N`!RMc+Ym&tS#p)}C2w~qio~gnDKt@?*_lCy z=bPeUUQA@$hf4PN-Qfq<)nC+EM2HhTik{pgg#CJ2s!y$hmP{qee)q6ric6+WOaqpX z@xi=8QV$wDnS_KL9%p*+H5t5M-hWA;cH{UM4IIY;dL{8*Df26i>qBm`)scBZ?fdDt z8iB&Cg=rk78N9tm4LrP850`~Q%=QMuu3?=$_F{g$WMuX>8jaYlk`Z`~-)7R)8Ft5X zzS`n(*ySY_)xwEIqjHdW1}fR@Z8*+XYhSI0A$&R>L}`7sUfXn8T6l20YOhgltWYPx zd*y5nn|%J18AInD_0v*KYxlp;9#kCZE3n}4+R7eQ_H4CTPYFeh)@o|k(p1P5w<;1# zC^65CYqqGj7+zY}z{$z+2jXthkm9D9VZv`6{yYHP!C&6f&8` z@@uTA6bP*LCu?3*aS>hcdt;j2c+N=lspVFOvlJ=i@UJ{d?ZJqufrc&%9vl|w+*3s@ zTMTQ^L2MT15*&^jRdIz$6yKPbcGuZ#UvlF7eKgf6l#i!fp!5}TeBKA?`c^unf+(4G z*Mq#p2Rmv9*iOCe(W8gDDys0yO2buW*m7U?z%zUAcsdUlXizf1-ERPFjR>YK)B2(U`Y$0 z2L*7Doii*Zm)s2Jx7b~e)n9}q96vSvnLCm8uTGw;yd<#=RDq8fWp8H$!^FqWL$Ws- zTPGcExwzJRT~5|4(A|l&^6F%{ITH;>Dx&>r2AYDtP}%tOWT`Qh`_X}fEYu-%!rT4+ z_*yc7k{+A+0|w{w{!sZc`dP}G4qDX+T;G8eEr}bm`RqmyUiXMu&g)^IRG_~f#roIX zwL`GWNPO_=oMEKI0o&1bAgWThvK&a;$#L{SBCeo>{F|+n;rul3Ikjq8Nw3K_6BFHj zJY$p{=mSCiaC>bd919)~PK&@Eh@)&oVeU(GxQKQD9%awjdgc6i{KOX$udL1*9_O8* zt-aLnMt)jTW?##F+f*a7@gOOI$Eg)O5}5sR1BLtbVd;suy9hMQRK9dAd8@PzKnjHf z-56dS6wAqKr?&{p_0Ha})rsn;zlb7aoGirUx z9xe2zN*ER&UZKZzwY<;ok)}*NygIwON}_bhmsjs`UL!5|0D>d=(g5H5h%P%}7|NXw z^mQ%~*qVScAwe}9U_bQ9rep_T=PY|aKN8D#!b3=sLTfL|N>iYhe zA^GABq4P^o=$tC+RbYh*4e!EtkbRJmg10;A6j7Oj?~!U9?%t(*@9@M0N!@4j+0sv! zS$EYWEjDa>Ts0A>@=sA+_hag=FSJ2!9SoKPGK(ms5`5~Op`AET;A2sbZyZ&u_)LL) zN-0&Q3Jh%Hg>lt+rlq0)XmLyEhB99m_YynXukt<6ZkX#*zoUt=m${${n>jDqtLtEJ z7=~uMKMPK!IS)SR7q76hx<@NwjeL_JzWGd6jJb^URPhA{P5Q?deOZX!(00F%*+&^j zOQVdZh1C1u)fhE!oI>< zYQxJ3vs$*1dRI?dzQc_-t5K|@uU_YnpVBNr=lG6O=UAx2n2%G3&7ab+mrQY|Mk5Kv zNJ@&YQQ89nYmgQ!0-HQ3UnWCOx=i0^o!9fePg9miY7NTz;<$YZ3TL6KbLnG&pbOEZX}nwW@@2kE zdQiM`Z>qu`>KR3Ythk8dHEAobBz~sBB6+0e*8|N|BsU!cfR=0*NyQSYm1eFagLO zLG-;=XF*9yq95KFjl(M|+w{(us15$R+xGMS_&GVY?B~93E2ommct3$^yoT<3Zwlb! ze3{{Qxn0jw^BQ#k8e1w}AJy4-Ff%k-z(JLM*T)c*6>UZjQ3}}%vy|0y)NJ@N2JRHb ziO-m)YvKl&`mK1h?|UWT7S~}g6FX9mDtBamjiGK&$EjgJ_W?ZYgI#zvoGxdWu?*{W z?axJ*7bN5Niwo-L;go*v2xC7;ykMcOc|GF0LjBdH4|fff7hrG7lI{mxzUWsEBnF&! zfZM`-xmig**gJxRyxbf)b?*x1C#OxRuWQ@KVf|zlqIVA>uxU*6JQ_Os%Tj*8kNZy06WhP)uL>W*Pw)Y zS2KwAJ!X1E5zJG!{WuAYWsxXNxJz>P)U^>~+v5z!05Sr`5BaCNnGnEq;_Hj2fbplo zx-*+`1FAVaIz^;>p^*2VwC&aE5w^Gq9cnxlTxF08*-AOGchqq0v9M;Vc`SZ; zfj@!)jKX!9NYXTLVeC_Gdujz@lC+Eu=J83&<>E}`LBR@37-PK&x!6RT z;Zy@OOH}ST$9`ikeR!lU;|7st1t)HOIAGtsug~nc;6Af>iwxm*z5jjnT6!2QaP3PH zs26BWPZCSP(VbYNT=eTq&j9Z_A*F&9t?*Rhi?c5i>7thw8Aq>>?b;VFptXfq^?(Mk zmcp#nm@C4zf1t%onf*LyJ%}I%Q<;)h?P`D8XyFpODV%*s`n8GFptCXuEb_adxgY|s zZx^WihpxXmplZIn*Y`E-Y|KD)U6Uub|ut!!C!S_h` z@Q%D^og+KIr}BGlpM9&xaVWB&8PYw_bb{_$O;oKY(nLyB9#jm6@Adh?Ikx4YO0mJG z9-{0klhjh3`F&g%Xo5Yg?WZrIOIIxTYj}8&a$AzPTkr?N-U#+(v-0rze5sj(F`n7w zCU)zqg|!vIHuzW}aHt=T!S|>$(gEWvtKiVk$ZLAg&O4)6MDbA%cm|P;&f|5C2Pz4w z13+3R&UE-<2K02~yb^z_>#=j{p{Sp;mJ6PD*7$5&VjvP`kkymtbd*^qNVBe^c{%-> zhd0=`LFUP2M}Ci!7K`0hcwRLuIW5X;&g_02`1=wK8E$h)aaO zp_n6iSNf<4<^)0@%_;>jK*jJYwGlBM0#^cRsHL3@uicG zxB^`Mo$sB*mQO;=k%zxpu2y!vebgY9(h5QCfg~C~R$k=_9%h8Tvu(Y)(I$;#w;eF}5KiGO>x2$XqY#V5*>2X( zc8%zp1%6>y=T2)%>N8Tm>tFnh4goC*$X^@{EmR4{*P96me@MhTKWPV!Tf}~RDCvsP zU+f=<6nnRg2M$M*A_U<1X}77jr}ew#pw3J>$1}GEr_DS|)~$I(uPj9`~7jbsf;b)ulG_ZkldF4-jV41Ax?Tqe%mP`ZJpvY*tJC z3lICnB!It*???LL!OK`-Ua;~lFjom}xc3mBpa7gkYOvGYWR`vvEjf>XwqpgFq}p_( z(i*mb`_oUX0|aaTBER6Bp}Hdf6=b8+(i zkRk#6AXOxd)XIWoaY_x_o2+rH&iM{TCQjauMl6eKJ%0DtZz7SU-gH@AEzl~u8XacC z-w9`6v6yU`!NAzM1Zyka3c{BUST_cK=<@Pj?5>rFPq+5?spR{;QocO0P8j>A&-$(| zEn{k~&ZAosZsz?eF9)IkJnmPbr72Msz8MhJ@$}n$>1cn(Emr`N_{;uip;l#<%%GD= z105e0SOm7~<|})pa9M`*F!6%w(|a!W2*`^l{S0$uPacp?ZZCF^fPnBt{-Bgw?wJSq?Z$b{9U7F@&%+ns?Lx0T19|83YB8iODA$*RnQvAc zT=HVK$=+q2*%ACHuMcQ_L|bbUI^Lhg6PUD!?96<~+Xn#r~l zp+hDDjrM!34itb%Ze96EUdr2e5>hvMzvES7o}mPSFha4H)97>uT3;~pT%GG96K#$0 z!2W~g%+E%KU8(z_b3JCj%pPtJc*$%yTJ4uW_PU_ml!lY8s~nLZ98>W%y`Hn-7_n-K zD=8)9KAal*DX4c_BqZ7t-O|r+?GF+ZZ~ELiUPC%KPA7@mmi)l^R%L0R&MwC{S{!2Q zQjqlFArD_tDtx|+f4)BXya%pwnU6_79ELxbxmOeF$bwS8cg`#RbkUPrOeWIMK#)#YQo`V%2AJJh<13-65OP5RV3U8VH-2 z4>OBR+TZx3tPot*=URPUA3+@vk@Cx7y{~9r0Typ%=V21?__JF@JbhrOhN|+`_n0cy zkzI@6?MG*Dt5_f0nW5p1>oi*@tjlH-_jT;q{VR)c?c0q*2g?1}8ld!mWkLY7Ch~|AOOz)$M}tqTTKqR_|Kn#Sid^dKN-@!ncInCE=o&HBy-49O z>0-9|%Ev){8`kY;9;lOu3xW_SUbCgEgkKGWrAfXeTZ-!^tKn|idoQ{|Zb)BFcit{o zjJNR(vhZc(_K1M)x-~%8OVC>`$W&)%CN6KJ?`=Mow8*S1q`LX94wbj8t<4*Ifxd;? z1PBEp#u|noQ;#m+80*8fO?oNQW z!NP-gpB4;VMcl*X2pdhD7LY`tJ9%eMOIscAp>h5|nuGZ~ zg=)W%$3g_1n1Q1!dHw{<9moClFj*4l>Eo$z(MGJo>R(9)$F6yBM&;5(ezq5M4}k=n zsdm`Yl|<`xo)~ZIGpcM8cXU$;`>c2AqPTD^$@q=y)5_upQR^tSvVJb9Oi}9TOVzJJ zKr17MNU3-7n(cELo?8T3!!2mwF}p-5Un@ZyozHIRc0erM*xK47As|c#bvdRoZF*0B z7iFh|PzKo00ffch2!17*>7G9#oi95}YH<^Bf1T3#2Q~0=WWzt1_F3K4n|t991&$>wq4#TQWkBiy{yrdIw!kB}jr7n? ztsvIdND>dX@U#D5^9vbcp|i8IhS3jQYK{0Owm(#}X~6f~-OOa=UyO5&)uZeJhjdJp zhZziHZJ_6dGqmlmFostbkh68pkR&2(8Dy`ZOS=a%E6DDR;u5J#dkx5FybL9ZgbvH$S}nT*ls;i;CNm{r+5 zfGw+Nt{We3$xHes34L%nGoQu1M=tA1q0=7L8|mHk4z0!G&84^evEurO4TFsPv$pyC zzW6~}Ng(Z?w(mearTgptM}T!Coy_6rn4NwwO_~28hD}?-6W1mG#t@n&VLXA?@Kbw} zRC=%TKu-Ch zk-8MNVP8kn>^xzfCdu+o?ZdR~K}N!a>+QJ%(4*-e(4{nhq_JE;^8y`-uKRL|OPO|Y z!l3Edw};a%*?LR0q+zxMm4HIu?eKwg{e4fmsREe&?i&8i7JY?rU6ysH2XpVPUh|3B z)&9Dp#o7;wO-fXFbx>%T&^R-a+pFe_JK}p&AV`{r#PTODCpQ)L4$rfzY-4{MI#!g+2CglC) zA30XLE9%dbB2FG7xS(nn-_W#nSm#WM>jmIZeEW}(G3bLfyGv)9?V{aOfX$X23YAgcwED4WQ`uqzP3s3%nS zVIlMDU&?@Dc)>`m$6^`6?r?N!phRmq10Y;QP`d=}I0b{VPNpb`uZY$P!R4XZystSq zvIH|+nE~kE=tk1eMGX(HBj%`fe0Vahr`5cTl*&Uxmm&MEGB36+M!#!PTig9PUk8Qt z6RkqKxGj|;Q~9Wc-)$Vm(aCft{P`oP(kOxb5`W@eyp6w>u+vahXUBAzuJDsm#{2iE zjKE2oo#|io-!!T)8MgAV=#=PgmTPd zdli7b07LiY`#bt~nGx_Do+aU3!Mr4LV_jk3B^8!l9OZ75ova zuc7jLJpa@n8PwPEGt!-F&wC^@SxDUPO@n>acVq1>_+`!~I_GoWM=r0`t9Sc%Gsi#+ z*DCC!qGFN}>3*Gv32^zUeZ4lz`2T7nP& zOg?+1PGmdd-G@`M$RpHjRN}%jyDodgRcU96@=zFXl~N~t%t$=ww<8+yXj^TKwwrm0-@DuIx+~#OHGZI=WSJGF zozD-m(x(g1&X)>?X}94na*-NKsLE#Yk8awMH7S*;%Mn=I#^6=i&C{zhnzp7COHh$0 z@nWa3x0qHLiJNLeCHgGy#OF7u)_1e`GYaY8&UNiCeAaTGBgtIk7RIL+gTA?@gNyN5 zDebyh9hNOgx=CW}+=@$n&ny*A5j=XUPmQMMs@vM6!Vp}Iv}lqlRjim_SNkY~j)>^Fgjk6^<+hI;W`gKRE=F`lPIwJ3rOstak2 z@W1RGq?E<;rpqe-%3*bTzAYtxeQ4XAAd*C4Xf2bmK|Gk`COZU;UId4Mii&!`72)P_ zqKm_!rQ>bsFJlwYbhGr_-{o`vHj|sFa}o@_&IULWoIEZx?e>^GOaC;PgMx_mHV{{) zH-)kiZA{2o<$Dv$Y`=AT+2zb|QREoQd!`RSxT=%cVnfH6ZvmaU6PB485jbXIYt{}@ zZ3*RsgoaM05*lGpq2>)`cyV8@fva2r#TX@a25(Q@jn3-BF8j7N?D%A0oSUP`wh*Q}QNoIND9)3v2mx!v%$td8j1vi3%)#?|W`8 zXzkuZaBs4{?z*O+#IF95U`0(K*U6tDtmz#cR~@IIEuC)F@;(%Y>Vjhtb&uq34CmL- z!LUm`*Bp_4?p@O@%CL{-=VIc8^7R!M`cYkw_Lz(i1C3h( z$dfqW5BhiTHtfqS!pTc2^tj=B%P{Tgx7KTWKg%SiZybg^W2nG(4+WWdI<{;y{N5m- z_{;fpa2bUU@))xF$NCbP*WCN_34Uvwod=0<=(fofX07nolu%%y)sGPwdtG@VYwlH^fvhVEhaQqco)H-!qe5D7ItNCzKzA+B^HN4ZA zfj^-3n^ug~ECYI*`uDMS9^tK$MCjV-=?jV{AE8sLL@7`kr7}5BjQ^zgiMlb)5f23! zxOH_6qiG*5MXfr-W!+8&0Drybm-lO58XWJfXb|^(T?eb&U+WNZ(+o{|eyf+8w+Nz^ z+!>y1QkE=6x`#NK!J0sW&-Bdmo9=z9QR^WHG=8CCRm#X$Dng*;1$ld+BOb zUv;pi16qCP$6xFWSj3+=fNh`%26YgswQX#>&lkuGN!oPUPi1PoKXVQ2P5gcv$vsLU z^v3jttE3O~)Z11+6?J0j9F82a+qtMn?hYRksHJk_`KqgtPU|Lqh>=fmy$|SIBXuAE z(RYFcZQJYhU|UE7oISTjxDFKnHwmV>KxrHn<-cPSwKW;ql$dvvQIGc zpfW+8 z|D%iwWlzE1FynkH1p3OZ+z=O;D}_g0N5jcxxE#U`A7>Uf;ldQ}A{)6Fx$bj+1JqVl z-;jpk?NMJks^`&Y+lL_g&$Er8ta%9D1WKE2w9lXkK8+8Zfg2JHJ{0I*A@~We!@(ON z@W7<#+qf^44}2n#ndvfJmfr1##O9ZrH!J3gJ>FgAq@COGquv1hhnaU@;YAVZ9B6VU<#H zK3`!8UAd>RD-usMa02RAXh!MHfqwI4t&MEg#FBN?UMCU-O&jpD$ zK61)H5!z%Y`n8aqBZ+~&?<9Uz|H6REmj*edZL2S(YL%UJWcp$Kp3Hqg?4#>lh#g>} z_$(%IqXMsqPI3jnx4xbk(wWTZ6xAr^bhsJJ32z4&9Ey!JC>ne_cVb}_IS3DChX=v?c`X?=6|{> z|I<~YcNhDeZUht}e$8>b-S*&~DeEzVA&?^-X{)#AFzB|Jd3=YVqV^=q)(K3VF2JduxZY*HNYIuVlnA50VIM!dk0+u?L)9+Gla5X%Vi?kiGaz+fGEns9ck}f=Ri!`skY%G zH63=hATFch%l7TTTuS#^47~{44-~N<=@Y9>=`|O<9zGQhaj4@cb*x^z3St+f1(?RN z-JGV-e9t8d3`3s*PM%?RKJKhz709$1&w;hO(E)q%7js$9>;R>ekcd#z7yjsnCB zWOqj=xlUGEHOYcbb}PRFRjEZG<))WxQZm}MdJV=P4&pWQPG4NYmU z8?Q>)i8sWZ$YijO^?Q64e8HH{_hwD;qFoDmGr2RrcMsK#oX?P`Vtg}8dn1Y!dSI@yKp%t4y4pPm9Soif_Z4FUiuM!BhSWHMwe776aSi5z_UN2;8^Lj}JIF;a8zMmm9fV~-Q&uQyw`@Ge!|9XrYtF8VcCEkZ` znNNsWUTfbvW~yHuRP=tu8CXkvv(J2ty~Oo|1(BS^fE2L}w%c|x#>bE-&W3`t*vcbx z$RV|KUs>MFC$u?ayU(RqGT=GIml4*`R>6N_ty?m}gz6TO2(i`?;?7I2+2#?@QrK-J zi&4~>)XDO0DvlVc;T^2YTtV|jBb3(Ht`{`Z8fkqBFV9?G{#zyBQgufoNoPNe< zw5izc31F(RT9#;XJPOZF<8+kBaGjB#LC5#>a0PS>$DWcf z>!7te9ES`EcW^0kIpE4VSZ`I}PdDuDp?Q7QkxGOqwep~Tg*#i>waepn0R$@bcvCsv z?Na-PUm|3$wa=Z@W)`76YB$gq7mjGi(Rn(4fz`XWTqZSog0g2x1k6tR7+uW;H9CeM ztx3D&K%+mM>3|{(O!5dupD>6mMwUI(y43BI+Jmh`U9YdTcc z>vnb8-O==FXELhKXyHR~s)CWbZhQ6l9g|i-ZYzc$Zr2CJxB{UMpWCH?FpG`c1^_tT z05DV^S@}2j%ii8-tz33L);PQS4ngtdEsW)D`#d?=E2rMd0o?YckmL={hwH(ibKLx8 zsc<{SrxUZ;t)D}*@s6(XI!GDLm)AJw5Da~x+0|V$vE<(@0^x*k0w@z*!A0s@sJCu+ zs0d-6dowdegEH*z#(Y%Sy~QjHYuFn$SmIs#%YtYAAcVE8?}is(*d1bK-aiA}JjM0L z-hvz#%uDa^;j_x_n{{+3ff<5k;Mmq{zRO`A^tEaKjvS$G{$z}@{=i~&wl)0XWUjDy ztaE81Ov||xfo68D4Tvj1b0ChqHnbyUtMBd4>W15C0bTpE+H(=3Szap-Y0CU@TZ`B7v*|a_)th6}6F=iW z?N|wYx&e6zqCYN|)3260&0a?tsxp%AM-zU#B`u`3wLNUXOh8-PrlV~5h_B=NKrh`8 z#W;ax%59$47(h|k73JVJX~JEAc85NHy$Tb`{7O`E@`ZNibQ>*6u`UC z?3_tRA@ofJIAeTEttK~9sKrd)9bV3h+?HD-(d`8&BY5PGiaiaiMb*ABk|7_;ad0s* z{Th-esIY*Hn?*>%<8+{$*&?_O>Vvc^G?Ldrex{avE!!D(Z@bo^D6&o7LAn5(O9Et} zi(_f-G7|XZtpV$OOXy{Y30gJyERVs0Ga_BY^V0^|xRnrFUlwkB#))?03yk*Ot+Col z_hDM~^VXJPVb(1+D&4G>P%ziX&vOk-qXUi={D{$2)O~MO;^x1RTS`~idw*EBd<>=j zImffVvRFX=@g!Tz4%R>0$v=@2K*#kLk$B4|Tziif8DDDJ1h`c_V=VQtzAX(*)Q=p-oyTjSM$#c!dn!*h~zD2Y1fZ`DY5wDuR73L%@{7fj2W~mQy_d(UY%U6wQ|2T z@dYi|82$18{&Ec?wdQhD25}s{9L;Zoy4H%HHVgUPfU+9s%Y=&!+#(+7>8f`s&}16( z3R)(E(-BIvQ|ND1#H6G?%TTJ49o+G#Rg%{tuLZI`!A(o|11;Z7ex8sepXSkTF02xWXMN~9QRK?9V7*q+c3ng<9flL zV#uGL7HY_kC&Pm+k8T($oq~uh1S+CJMN_F9l1fd~22Y46#W(9iYg9t=JJ+_6W6^+8 zFX$cUKZr2JCLtJYTTO-@NIem8U-)#BFMK-jizeHje|e7o{;PNtpf4nKnfPGzcc$@2 z=7>*@IAA7|EmpX!f3nKIul(Oy!5?p#RN!@C%vw_HRg{q3R@_GrA7xh7El~dpkBW^#1a*B9 z8{bYFABM@$>v~Kk86U0Tc#L)#&1@{ef#anCG(w@F@z$oFn3&bNE>$e|dCMTbkc&t& zF(Gevz9r7#V3T<`SI0WzWQ`8sgQEh!5=0r0Nl4Tk64_BPD?|`*5f6E35x?;1>NL}? z^hH9&)7?M=ncgJ;`Odj-9s|dAz`dmnW z)3k;7Co=vTv-yv%<67ZlcHB2&_L6ZgOrXz86)K~dd=#f28q3!oU!P?Q<>NEEb{^PW z?V~4FiWC_>Ha$=j(~FKSz4oCLx`Q9QAZvCwp3pqmJ+?JG`UZ>F%$+Y1hr@2w-+N`T z-R?arxn7a2P%Yyrp2^$FJ%5ay%xKb})$X<2rdeW_Z`FF4WN3M+Fi|l8KKV^vv2i!U zy`@I)ce4fp6F6%Wu;Bs9_Zg7M2pQ9O9Tj*V)+`(ITD0_1*)8Lyi@l8~L zJjEhp&{9xHhb+ymT029k{Igv`yP4D8V$~RQn{TyYF=$Kbg|sy4cdQF-tjUHfdN;w; z{@gC^-`$RL65w{YJiu&jHpnE!i2K?0wG3JKjHc+%fJS#$3W&J*%s$yQ{07a(#tr z^s^!`7@$j)3Hk^3+*Ch`0x&Kp0_rlL4yWq^MjEx_Ax0^ZB`)E!^}bk*>6CY#6E&Sq zj1D^!R7I+W^^e{n_Nz~fI*b(e%P!2nciAuRM7RT4;qX4@*ij_$1fARCxj*s#$*4oe zi4G>!xIm?zP`OGRw>j87{IFJ@uUKDjQkHM*uQFHW8LHEWMJ*>j>$LTjQX!|J3%Y!J zHGJ{X+>7nsVqpK3Jny~sgabg;TzN%#F#4^|=H4~sTToco*qj383M5Q8w*Iuop80G& zz1?OnHOU8=s)BuwOxunI)(WRfI&(Ko|EldVb-gfC*x_VEY^J_-iFiqj_+K9CFAh!M zrSLf!iqg(3b(pkd#o!CP2Bzmlh?OHHal2U>qk>T(o=KZlNKk{T(s~&O zZNKM@_VeD=it`q!lUXm{w{{x?^7Up&`O4pFcyoF0VfyQ)bz;q_M3Ddcg6b30ff0~_ z=d--?hR0S`JdTch=DpxgJgK;WFn6W^WW14mI<4ln>{jz5Y0MmU8!@Tc7t+ye#b^>0 zJ?FclKGr6VTrNNB+!-9Az&mmMSoGR4u`P7*Ey|@)ZKYvj+AQmROVqgch(V71kh-wH zkppTLR7!c7kNqd7jxu1>j3GzZTZRyEi=n3m7ws02cQQ&1LnS#1T{ zsb7F@fvsGXz`a;dTxEJ$+~$=6_jOJ?Wlz;}fHfux^K3 zh9JJ${9%HmzVsuy=}9Ze^GFyN8_0H%YlPUdo6F#qxUDVo2A~3U+`UP02MHNam2Il# zxj%wLHysaj#(-|2*W=)9oUFfxo1>Z*&unJ)5WVl031D6v4>ObEwJRU!y0i1jnLZ~FY+%)-2zy`@zB;8 z`r`-+@0cc)QVAJ5XfdX|6s!j?K}X(&@z?dozXt0+R*s^dpEyB6SPnzvZ~D5B%+~?W zkgMVor0!&TN9klxMaLO$e+YkWZGjRUDvceLYncdA23OU;77s?*O(`nC;V5oYojS`(mJ@}}NLN(`^$%50wlyXYsqr<>cOeut$x(B9mu zf8FKK&9PADvasr?{2NkGYbu$w%+P!W0I8!V0h0D2t$B%mX2xUo7S5&|rgBp|Cm9SA z5O0s=s zHrCr;M-8)C{M}Oh`c7{sfRG@qrS3cZjdnwz!KptPnTs)N-QU;%{^!75{S((5gB$tVJ<-%`8#B5863_YPZPGuly}v%uO-<;LV1G*oikJtcQZXx) zn9aW>Z~s-ys8ztW25LWhrS&&P6F~pr0#DVu|K-H}w_o*nLjQdQv*7(6{b&1Ri|e)I z(tgiA9C?~KtKvUH{vQ2j^|V?0|CROsU1jas9|H;pbQFIs{HxaeYFdC#A5fR^8bRW( zjOJep3u`$Pu2B8XA@GS0>RtCiGMYa&`-jodQQ)-AFr#1-=C{AK?Eh?Ig7FErr9KZO z-wD1xnE!$sz&^a-vhOnWn|4Md0FzI2g8dBp?^^cwjgY|veh<`7sw@;LzAhCFl-#S+Qfb?PhUQ&dNPo5pghP~wf>J0ct2Uq~2`k`C) zA4*UBS=K>M{wlpO!=*pJ`R}iF0lU@(<174oNr_4V@<)rNyZx;XiVy=%(UU6{wpQa=reuTtHf3L(&2LAW zGs!a%*nwjGuvAQTptG#p0axO0jWApafSmJS{>HG{6}Zwr|H(S`H(hb}ww1$i=_|#+ zmC^U#?G~R@obXA-;XXM24@hdwfHoWt2Gag^<`N|oc$zy=L-zg$a5DF&5-iCtHU2XF z|C^WmK2HF4!q+qKzaI}*0BXOBouuY(-S{_E@`ie%z6mf?p#SZozpWpRqyWs;x)*_kGa%q-4$5<}B> ztMERNssBdke@?yzu0Mf*apumf_u}`$5=DI)l29aEzg_DRLM#DV`2QV+6 zQOcLkQSv$Klolmah2z(yNyv?Q0M0pNFWzWi(Y#-MHxCf76LC>X{>z@~tUN?Y3zVn^H>;%hxuRN2hy#YR+K$)bCd6W{hIU9FgGQAf z$jGoyRx0!iucT#<`o#YH>Ga@cM@_cKnXwm0gT>>s5v;M8p#mxwU)BV7-vCvl#j0^8 z-?D(Tf#6iY!G6(~^T^f@I4F5)=(URy{5D>k*X^HV_gC0$O4nN-XLj#DD92GL$sZJ} z%6Z5j^us%716AbXyrzl(&QT}eYE;gnR?p=#6>KkO#el74!9xK zR*t&IvcxE9XO{|-Kc|0*SzkIlGG=d?V{yrf@mrJEucxfgmDcqdzV<+6Ybu1law}b$ zj5rwSRY<6Rzq}tXn8wrCbko=0U{;G}cyqRIykrsUcDX|t&g!LDs?T)a=?N+Hv$S9J zUN9wt6zMtAK_NX5r!eZfKkcQP(jIc({AG8Kn3#C21N$-Bu-`MRP%UZ>MjDXz*eWug zoASfM?x7F|ESE8yw#Q9#9(gSof3G@{ zt%ewh)LR5kK7n~`zK2lN+s4}JQY<=6)em9LldB0On)Or)OCB=HMOU58!Apj4)%&xx zvA4-AaWrZbJrDP=S;nP{yqDpJp1fQ}e>vmvek(v|Ijbn2-#{YLz21`tg=Ms}q@yWM zeGL2IBbnGd+7w|ruBr^eXa64HfPc-GT}A!;0h7S9sc6DdUa~~9feZw07NpC+4R&;T z+Gz`wI2BIO(#pX^w*U~mXstf|&zN4%PkEE60U}=vcn9g&6B4Gg<=b4HaWb3C&bO7& zv;8^r4O%4U5=6Wa83OmO`G~H)L^tzY3JtUY;zYF~?tVQSH|KIEK&jICxYM5k2fWYG z$wH#VFO|<+e*&)I_|5b~QdEvbk3-1t{hf&t(vHty;-dyeBkoN}Eg-skZM?p6Z5tVX z9XM{yNz{=z^Je^s&1F4;nyziI#bP{^a?*CIPz{SS#Yo_MlqC_5Txgc44Aa6QD@`Rs z5cuKYb=Rm6JY%ssZ-(=$v9!-Y_33;bl}LY`jpk{gfKxka6sltg$FmWR>(g3~-i--` z97!VoFMeC6y*lrb0S^wD?#FELeV?1di8HgMK30UjH!XW&Z0^p)4}k1e-{f-57JTf0 z83UVRnIpcDe3X5-+od;8^}l-AQ?&S1I+iMSZ77=D6MudKO!)N`t-8<~hnca~CJi87 zjsD*9Cwe<5RvrZAI~nnPO3AnneuiFd8GCfvc?Vs4J%F}p?X2-;)9z|3a_+_h9=?}u zq0u2W{DGa>Mmhw%SPjMLw6jIr_Y~LQ4n($K#2w`k4oor2@R72bO^vuGvAPLOfqqbU z-IdS!V{!9qdpP8g_TedAcYXl#D*Ll?f1SgiR#w!;Wt0TmRK+~Di|%$l+?VsGbRG$@ zK>H`^}YVa{@$>>FE2M5YQ*H1Y_s(btJ*vrLZyNFS(bVHkbH|L5BN=!#2nvx`VoiYo=LuD?@Qa(RROSKmlGfT0&v#sJ}45W!5JDppE6RqpsEbcasv|PgH z_sRF&wJxm4UuJFUWavR-GyT{eEVo`ER=eAG65v!vre=iaK3C+T{o1O(Q|XcpYoE6} z{mp?FYt6om9KfSb6>7c$a`@wGm(tA6Cu;ylZFhlaJG3)~2g|8`tjlwlCPXwT1YD&I?@S+OZTTvhN%-qPCI(;za^81H(Uxg5pg zOL&0HYRgFd9P`Vx9S$3%!!LL&J=Z^b>9icVWd`il6VO~x1&g&BbJyqV?Od^(k67H+ zKFTyUPGC^2T@Bx4OT-)WM5t6z_`-CDr=zA6_R>cK48ZvNpXy@KWTA_A`1rH{XaU{N z>#X}{ug72Fo!j?5Rr)7tl}8F=xghmZjV+ecY(8m*S~?SWDaLi>#HNrKhp?F+kpkHY&( za%2XH07|JqF|KlD#kHcT7NHvQ5XCP^AP)}?8l61s2`{_L-sE+6Ouju+84yFGCdly8 z2?)kV4q!eW?49yU1$)Ir(lnS)4^i<6vRI>br&q%7ySGxYx^P3M^P4tv zs5n{;<;06Nw+3$06OHl{U+#iM?rtHsKv!J66^AeauAJ_7phYa#HSDEdNk$^x)T*cP(=HsH5!x%um*RO6 zn_==A{(OBY9Kk{&I^&?;$J1e)>z3h-dZ*2bMJcUt99A>YwE6nAs6G#yqY)yE5|aiA z+)IpkbbL;3QY74Y@}4EzVIIn}w})M!kA1M?Id0&GgZnD?pEjnm6Kxh-g&pfDeaw)% z3ytvr-BCIWpN~#ImSM6|#Hn5dtQ5|ut#sMDLL#y@7!-Z(c*%ROB_di`uKz(IcS!4g zq0Dk93 zCL?S2aQ6Glbjjq4jOLs?&|FS+3?Ryd;Tck5VtH#mFo#XlwBNqz$cPhve%eTe<)BYrt(*;E2zp>*d5WBQ_O2|%4yy_ zZ;WCVLB1a-RL$@)@;}@T0!WB$@h{e=b-<6gR3n?-_0qKs7Nd8J{r%8BUvk|bx?7!2 z%Da=cH_4iZkNheF9tvKg!yr9Bd`;@?%*0LK^0)%q<^<>Hwc6#Qe`D?}J#^jCJUhG^eb` zKr+seo5b-*rd!`1UNZ3%(HaD%YSI<{S@FB0>}yw}FQ^U&U^u)kY-z&kPYt5fk%Gm0 zD_q_xY_Oo&92e{@WON-}x0oI58ODun}DBBw=GKijM z$76GrNu#I_(CNNp$#K}-G|kJ7W!Viher(y}80&xL@Zjc{5(R4hVbCBiXW^b6V5-SL z)5ro+5D>IAicri~FzX6HNr^Azo6Flkx<_YgILj}-GLrT3{m>gl&n^+nhC!OVgEDOjM1hVRDT|G@t ztWA051A|ozKI>T&jlL&!sADSX7&m%IIUg%{Z%A3h9i(fQ^GFKBoQroBT>o^CvM0Zb zTzfjP9+QZZk`iq>9eNedo-7hhuxnEV@U5rMr$L&Rb>OHZ`>wgEj0_hZE0Oz&e1#%0 zK9=^m{;LVa=p7H8tz7cKJ+4)x9ZHat?F_j}iKfr+KzI%{Qs$Q35z2NnU>lke#n)q^kq`FiYPJaej-YD$%RPwnhr@kqDaC#C7G$%ZT9UG4>VnOIeSk>EfDIh_@dg~-=f%l zgtRfayJDo%94km|YY_>zI{A)HJ7M5atGZfyd6=&W;IB!)7>p%j4m^$lIZXvzg+edj zn9H7vfA?*L>G0d$Q)52Aa`1nMRBjOncYD^}{Z+tsy$Y>u4~B7bAs-|mz{&_hM888- z&yjK)M#CLss}#EW_Bfc5(LtEVN%L}ODeS|rmFr`LPum=%s7-+bAb-vi#>G*qD96$35P_s! z`wh8=fwYb^v!$VIWCFM!u-9^g40Gv*lLboECF74CoEl3cM#gIx`I?l;PjJ8;m#Y$f zp(>53BDL7=X#@Dd-5ZPk?5;pr&8vf!BSEkGh1x@`0<=Z1vlUpnVWLwW50@T^6X)O! z0x#S%U5@FnFvg@!Lg6Nq_1NpyG#QD`XC}MutMW$b}dxTQVDqk^6pF zqOV+fln5he);%|${S-vtYuC_mc*_3%6>&G@33 zG4qdPywgH&Quw0YLVRX=(cb?#Xc&8ZX}ULzk^3zd0mD(Y5H6)3ynDp*HR!zS{=#yu z+87QQ?&E5#iY9tHiP&it`%)R1bFJkZ8Q(+IgzGmP5AaBBIKSqJ!G}O^tO0-otG7WOMdM~xDU)F<(L6NCu*bSaz0bHL zj+aJ0U&6BQ%D%NiE#HR-evcL`oiZWpkoDTw!lzJw2l(UDF$u^KMcg5`fdn~BEa%=L z#alua)GlaWYky4y?^)(y-fvFz1x*+XVSY7uY&Jh*PKG{xeD2hMCPzrshMHvJ&*>#u zl*}N4`ewO&-HJp6xmE}}h3kNFUSkU~W$)3Y?2t=3NFCK!_Z^I1AQF`fjNr>Q>qE&j zZn=2SIqVQhCSsFs4fP=c0KMY;7($;fwCb&1bz+rhf=-p@*#MR{JCNZM?Li)9Dr*D7 zM{bI+a#*GNeeRy_K;hde=L&rN155jjuBFTm07{$?B0mka>3=GWj8es@*`}9==DGK| zMzqTcO298hy=Kq&TEvCrwX|qaSa>;__*=G2#36_@);i<8rIiK;Gl>_Z?#@hbPCddt z#t4EkD{inIsyF9u0)*dxAYG=tG+egvp4WK&rkR{!4+x+!xusLLaM#&4PJ6!`yg7Z? z^7t_oi<|l8(reVPg+CpZj*GliyhJnX@s*Fc=HmNz6SJ(lMI@LM%dbSF!l*2`om+{T z)sYqu>K+NqAN(GE__Nd+X{)r2B*2>+O+jk0nN5tK9bN!iPn~PC=lkjVe{rIS5~4oe z=TfN@!j_~J2zlqSr$?yeZ0WW#kHO)1)Wmw7E-!UbyRABDhB6#&b-$y2N_DjHsTQFN zqCbMM_~d)un(FZhx|fP}%RvF2z`LN&@k1-8wzPfMKiR({;Guu!2v~fNs}8`GI7+z1 z2qoJ_M3RS;CYLOx4ZAnc>GaUJKgnJmCUrDe5^h3dd|MwMvhxwGHXVFb4d~? zPGUV@4wX$fr+zxUmPNGc%hVbis~>bPnwCzQEmSi)DQrU`EZGh=}> z@B2X+Y%-g}@rUtPMm|*65du1t-k(6i`BJ^8=KtOt*Def&cSN!I7ku?YT`CbG##0#>>q%-vB+vhl}LWvwI(7N~m|Mljk>ik(>r^ZbTBO z{kWK6=#x46-XnIKxq6`|GIbuOh7O;$o+Zjk%-@z3s-!+ro=sh@bc8)_={3HblI?OD z|3TzH|2A%A-G`1*k0We_b}y-Rm83}vpw?x}w)xnWy2gFwdo6TUyOg+N$v*Yh<4eFp z{6FE=wa5s&6|W%=7ImfvB*l*tV4Fa`Tn7@~GxEMiBhG$bW>+wvN(w&Zd$d#g&0pszk_X z_gWPIu46pv;P=%Vw-K+d(T;LglL!9R9*>rOL?~M%Ed{PeXOLi{PS(bCmPZaJuFvqWO~X4d+wE8 z&Y`U&{d9f0e0~?R4Gzf%&uLHvQ8Cjuy%!lFyb{dOvA0pBS9frhut=JL?Z~1Tg1}@# z{qUfxd)y_Z_awJMnIHe{Nx!e*iqA0Z@e9Gj6O0z9iTmgs7oTP`it@QYCTFsLknR$( zWdkfE|C+@C&;P$f-61M4{X)9)TIB+N+(U8%aTx|Nyw*WhzPpfez4zlsZBfc^a&(2T zi%M^1I88=nfbF3&x;%byP>ml!MQu>CzZuhO_#Ypk_1jkzLlcXH@fYPw zEs~Ff@b{X~T$hDE+CG10hR?@{O`xI;S=Hd&rZmBiPlhv`&R2S>_!@*iF)iC3QhpDM z{2d3yHg`Dz6+x1MaGbZqus149rz!zaD9YY_ug0<)IbW?XrvyO(zWZaLQXst@4V(qw zTT_Yd3U)AI4e>hB0PQ+@Kk$!%UN>3qE%JMxLCSm*fV}Olf6+v|L4PG*>n$3^LFR`Y zm4{1@;|3dmjE@-z0cRZ7OQuV>(pWANE4$g-I=RpaCw8y`4H3D#n)s%-T{zg_dYBpz z`y~;P9pAD#Ha)_v>IKzGtFxYg!gbhBw3qw0GyTi{VqP6h|AgpKguDGnz6Us1Q)GTG zXyC&k96*LcF>2HLnfTMBhT_BFmNF};ge(GEpG00k#XYj!8{>DpLe4uZe@VD3U(%7Ip5tbS;>zUS-@TLY*c;0QP+GA+hl;e zYhxN_xfHnRUq3vSZcn^a}^&~kiJ`ec~9ZA)E$_6 zaPD2y@q&u(y(ovXmrDct>MRP;(Ft`p#eDdNOE{C@G!izaWY~40K3R#rHSS*zD4@g% zH}Dm~xPo@Y>gD7kbkEJgk4$>hAp2vx8QGuFztkT-vmcO1bhu^GX|t#>4j}o?w*$Vn zG+FA_Znm9a1EHT98)gs_DMyP>zJTLRqcI*(<*{UT@J{Kz`4(VMdLk~EhNT*M^YI3< zG?j--rdMXkqEfKeus3Pyq=d1%ok=4wIvKneLqbxTn0c;Olpol%7NYsF1$Myka(#w5 zel1<;HfzH}xJU}MD~{%j8|5SGiQJLBl>1)$pn!DiDWXmU3THhE3VH&43zKA5f#%zU zub(Lx+^?v8YL7s<0$U7m<4fPna_v%Lz(7LxH+MP->v$` zrP$U95wGP!H?*4 zp+ZT5E;+)Wl--bIz_2Xnt5Ndp*ulL$DVYq)YznVOml@7Z=V_lA>hSaLtZ+Up_YLyA z4+hHo!(4Vf$E!;W!m;MGsfe=Io=<4KBbt^YqOdkhb%)dAnLS(harEpc>SyJ!DL>zInG-d#`DV>5qwj?PX7Fi0m zk_yzzL|@6>15J`&Trnh?(G-i*AopWO+kKz7RnL(_&kZv5_H*%-T`o;56L_d<`M6rO zwF@$syVzGxq0}mfNlWQ^OVh4)E`RNWx=|>zg!PvDu82=dqFZ`+$FYvSS^F$h=hoS* z%I(t1VZKfn;?|RhXsDKu+aI6Vav#)AqKV^&efrsrut~}b&cQ#0zK2vEtgb{I@-nKO z#7pc2#+2p%0pJh67a4Y_l(A(guY34y&i1C;Ud&V%$d@i|`gHD{MxPpjb5amU~LT>Mx> zWsh^f{3Bp|$?0YJvasF5yi0wSAStF|ng*)Pp3&vrmq3z~OpiA)tGC?Nhp@XxA&g5e zZ!dr|@+aV4BnUB0235;F5q|o!qvJ>Scvs}t%-6mH!2F*YIZho3Ryn%E&h<;eqmn^| z>agE&l6%7J34dA{{b}bp|F@v+d3Zt>w={RPiKT-~o_7V4ly7^eqh4c1jbm%ypTUcW zKXcyUf8{fn4wjbAi!e?@Tx3hEe7AAJZ5b}J;BTiws!t}8uEB;Ejlp+t^kdK^R;DW+ z+Lj=v^sxC(=BPKBFL(kZui2+KPsHO+>&^!sd)kf1K4R!fibsSMxnP@T zZC$;K70DM%juZ-o2t*xy7K)&`JP<}k>t4eqV~CW+FTeQcaeFpZPrmG7!$lK*GR5Hd z!JpX&pKl|iFi?{S&dY&(KPAb}pBz!>lTmn$XjyF@qiIO*8nbcx`ZhAM&G)5Kp@&ci zDg$&fyU4lBUnqM@OqT#Q9iu>k69euP(a~XmZv_M6Ms|G>z-jOa2yJHoaiMkq5?cOSU4HH>s z?h~xuns(=+_tAPv`@b5RhY^$ving#Cz@Zx}omoq#Ehc|JeDtIk(KbWhZ@$*+-aj zI9id9U8$&l4K6`Ter6lq#Py((x(c;Yr8}t99G#jhQxN|X?^-KU|D=S!jxt9OgdIuf zlt=O^ak2Lg@ZL56-aE~nSrx&{7$u?8#z2QDQr~Zt5#_EV>Y^T)HsRu#gwG+0u$(}xP8864S>2=yPjM>X_HVpPzg}?-fv%xFQJFmMgUx5F#x*fU?)jeqEoBAnD9<=;e7 ze`@iNie8(uPM$;Zxu2}r_`Uc2!R&E^XnHxptal)wxeFBfyPn&}2L&{}1L`P@tbQsp z5FDR@g^44ZSd?4$j)HLCM^r;d%EL2p!BJ8oVNWp!dGs$Hv$8H>_-d}fKL=rf02-5+f!;XV^m+h)ycUZYQudUM>amb-~)+caS+t0*4p zFmMSFV_+{o7h*XmY{^86h>M3d_*cXE;?X6L_z;N>k)rrSEWTa_%IbV-+}*JzOac?EH!4@EYsAIg!#dq2V3Cfa zn#~P%KvmSbp%J58tuUMLdljN6{3zECpcG%^b}6=%r$EGIYyXDz3EPcIqo+laN|W$_ zt8s<*Jsi2}z$3?AGI~?UE~wrABlm@vs~v;ir0eE>ZI-&yrWe(6r_b)?-6@!cv9oHa z840xa?g2fS@jZ;fe#Kc#Wr1P;qwyi{@;8dvXUHvQi*AQtQMp$_!sVIIGxgp_5`Xa_ z)}{YMNa2GaSN_81%@-u$R}j#`{7J92@qq#?%`W5vMu)cXAJFJl_#UF>m>Rim_ZIaw z782`wU&!A0dTx85V!`mV*d`wgS=HCa)`C>>5Bcc#cZ&4<5wr=|NAt%$##%$Nv-##~ zJ|M*;By4)m>BxJ8y?LxO`6>|&b7_Bwo%KXE+MOj55Cl3L;iqTM|M?U|tn)rwMJY;Sb}HJZ z*+#x9Hc+B6w=~|N@SEGuJO@vB)9LICQ%VH|+s)9MTj@r$@i;EA)QbfE{tzXKPZndZS@3UdZNP(d?Jf-PFR~lcpu- zg<@VN-bm|~`8a87*}vncbF+JS+%L&~WPC}=%hNqKH+kB};?%{daPa=*`j**pSyV-l z#T(aBOjwW~`nmZdLy#y0QXjYJBAXMf{+Z>eGJEw|U&FfndIp{I?%=dgcR`*UI0=7q z4Up?aX7?hD0Q$}HmL=8J!S0B5Ukn}X`M|T8Hy&5bmoSe&D9U>J_36Iy&BoO`=0Gr6 zG576N$46T*_Od4Rr9F2L5EPN5%tJp%=d|cy95@)C(o)5RI~^qmsX!v)Zq_J1h6%@N zK81>uX7U2lGIaFNe-T1>rpmN3b=t-H!{8CRu zl3)LrFB!6C{Gg{RRPC*or24j_KHn1VU4SH08xDSMXF3p1O=M?W6kiJ~9b`lE=yFwS z1LX71oX>o>kubb!WL`UJ+lmutKtXD~t97hJ++me-+<&*`NWI$fz`mT)gkeP7yqSSq z%FQPbM>mhQnsQ#poLdqJ-yA3Op#Rp0vw5NQ+6va~HPVpbVA_oIZuOO;qN3K$MKP?w zXiBKIoO*JcVjHEiP6KcA^jl}0fm{M_d*~Nf!PVn!DI9h)l`}0~Iy)SN%oO=tOUyrn zeb^@v%3pTymCtpuiQeKn%^Ois9oAg4h>mCPl4NQjUw1zb++W67Hgy@N)s9Q=3Jqm8 z`6d9h?_<(-2fgTs({#0b?*u-^Z^CGXFBWpY3(9SZBS`l=iQG>UzntE?*=SA~jV|HF zF#)Q~Dx7^8SI`N;v+Bkkh1djH5G*8#P}>9~*k}lXulF4@L61%ij{>EcO7Z5@P+tDE zbZ-0A>}#7kWLsJIt(w$9`sVF{Fbd!rg>+LVCkw7bFVaK8A5TZ0Yn$GRM zx=p^?3?nth5udY{ESj7$$}^mo6@nEE+&WF=k{}$Y0YB=Ejg++8;+;e$^DyX$k8S0q z#ru%^X3uP2`^b9;r8ACIS4*$<#SeJqKjyZlSq_!)d2z{BdXeEM-H>?95Ouet%7Dhh z94?70+QBjq%^aOcdirDPjRt$C7FIHe2qvX6Rjq}3iwZJcpz8|rd~>vGeT_Dy>5$E1 zb4O-dfVEh+52ZSBxCge=_-7s>jnlVl+$RZf0-aLkse`zeiG(9<)&Yj~GRwPSGPfJ@ zR!r63(KWdVv->526Jr{&Itaudk&eZ_eYhWfVQ*5V%m0{Hn^B!y$iC>_Z@#i}ksYgg zQ&Wi?QY=P2`aZkGc(4XTy=~WlaK8k_=>=S0-F+21hOK_{4QCgl!<$*?DPI)F^FExN zgdnvIpG$J7k?!;=!G?2uON+v^WxEl&?+>i8&X#k)Q9#}Cy_3G-<-x*(mYAzf_E%*y z$A*?lk7y0r`|ag6-;-$5l@%YR2I=s7AjmHW`Cz{x?G*Ga7X1D*Vkk4kw>eE0L&aJi zb(g$pB1X#QOC^ijDS@r?Vz|(_TD4^GhU14mXxDq{zF&W#?RclEC_$;^Il+#fGCX*j zx^P8qrFFE)JnVNkw%2&r9Dm7oN73aZe#zl}wFxn#lt% zf4L_;GAelKUhLGg%E#C39 z=p1qT;BM|B38%D_HG{Mk0gFHx1~udo-k2rJ*O70VtDL6M=EWNuZs$8BhY97u^Ul;( z^#=;RA(jIibtQN64Su{R^NwQo`5%T8N{*|9W%9BJ76|rVvH=1!VOU3Iwi*vf^y+39 zIiGcv=w=_^bU$*MsD3-)E|yrO6t>89vj^9Va{lgy#$xIC;nUX;y6NRQvIQdmZ|=xh zqhqynz+BVx;6T@MdseEC^REd5xPrqRiNl*+|B-XI4|?jlmVFRN8sj8uBQhHQxSxa`e%sAAIp5S0C} zBAT!M36ohMeptB22QxQGZU0M-yZ=dr*`(R1_c(eIP;P`eTOu}U#)u|SHugBK`+rUUwO)2t7<~AtU(#N9KRMQrYt{J)gZ@=f5ux!EMGf0#Q?+% zGt#2iCVlCECW54I>mI@6KHr&O9OM`zwCs=TPvdd(sq{;kwhF4krI4aW!87nRB4W-t zY6l8aODu41Vh1BuxL=<*&zQZe)~LBT-|6GjPmZ{KKUjzx43xd-XIjP5luK*RP0tLl zTUrI`5W3KM$X7ihLe9CYtdSh~)Su}eEa)lY!wx`=$j1&kfl9ocUt)<`*dH*NaU@6i zD{GGOEo;WUGH?}OC_eoVVKo+!5Y?lkKxNFBuckIwnjhzGa`F(041`%741D zK7d0_;BYEoHKr2OCA(;B?Sz!)WQbJWqT=&`zAu?L{ zPir6GlRyRC)1h^n4I;?rrUFm@R!XduD11X(_3JnRLUBfp@$>oCrpGs(e0w7hsNj*=@lJm7d(>UwO$-=zuU1tG3f*74z%B|MS8T=(LPy-ieCNqZCa4cR%;9 zTmEsCB@OYMIsddl&g_>*{^Jq;*_opL&!LRuSA@Uy{Kx)>BaGmLh8xHl+WnCK^4E6! zSs@595Kt4u{INobNq$kEU+&Al^NNrmnzIx)`@^s8`15_e6)7SZG2%F+usFQDeQd8| zr$x*4NZ)E)?Ahu*da5-VRZAw`1@Fz((oflyYGx;CPHf4KdH@+00*?FhbmjmViLym1 zX0LB6bg(8o`Ps>O5-=Kz3JOpv>pqhHCSs^UJD6oj#SKwZVX-A7JzN zkh{J2=y@}EX4SPcGK=mfy3+(ept>?BF?qx`9OogDMx)^e?U!?AfMGP&1EsPv#kDlI zbh#7~$5APm?zAzGxPAwNwC)p(Bi8H=z1Ph8mBsA4NI5}3>@S`6heADRa4$)O`cL>L zo3x(YDS0yQ1ixtBE1$idlMJez!{;v3F&a#$%D}c zZ<@vY0NPNwSe5K_V+crZbPTR}S6|f|o7oo3cIg<^Mh_+w3ByCCn1iq@S|WScqotND zD>L~-;a0Pkvo67FI=xNQsAV0D3xZ@Gt+~Hv3w)w8CbApH(Oema#ZYQ!(^=RD8T7|V zS%L4ZT=JLfY=3@-e>7suIo$s-6UWi@h2cdK3s{}5@s3Ii03Ijc(fnsEB&>jf8xU$e z?@3}!Hjx#CM4YR&jJ^EaOxG<*jV1MBb2!>mfn36PoS4)tEXCGtx6y!q$rvoJQYa^Z zqwSub`e->uPu6o1Tx(Ul-6g3J&ONd&hnr&RdhWnvwa32bnmST4_Ku zIm@I~X0cpoVBO1^we4JTCAIjlgn;x6gG%uS-AskcH;~kPbRv@>)54d@Sn5(MBW9p@ zJ-J0IiSkCnOJy0 ztjy1N^%r3DgU`#Q40@9~Uu@L5&z_LEUEl!#NP*fwng2XP4XOR(_qI{qaMEzt8CI;; ze^*XP$!(;at2YlAbhLi-qz49Iebcs|G!(3U7y*Y~SJ@05x+eiSJa+f`67*^IrB7NE zwF+InMvHgK($@^0T)G8<_y>A!lJ0@m9B=>2FB|^jm(STBYNe@EiixgIHp8n~>g|L> z$tykg)sqjGGkl*6(41`!P@%PgW?(9@(txooY96pUHlqh4cc!~>gG^_G0uRE*++ffW zZ{o?EP$I8o0r!LCo4GGJ9|lhiw7DCFxK^K+LH_4v5GxWk&=Td z0)h}il@0=-*93xv-a`q55_%7zcgVNg=R4=<-dlg&ANM}b{zqAB&AH|pZI1blcWBFO z%(c&Rc~D_hb*cO475WkeILD?~4CTz_B&#Sho< z8#DezpOV6Wxa9W47l*N0U5n#mC|7HiYJyT-BMD<-7X^)wxx0vqe61q}ensGSnTn$#rS4{o_^@H2yZLv^g zZp60}D{W(#YQXobnlfS>tdHnewxba&kfGquUR4FQJo6A>-O~U>^Y$Wt=H&$%)nLBX zUK9D|m|j0&YhT!>d%Akw;={^2_%Ry<$VCRG$r;xkI{UmZsJsX8nA~nlDLg>i(P&y9 zDLiQ$iCQ?_pM?ee`Dg6AknX>;?!)J>`ar~MBa6uCiFOH=frzxmJG7g!D(^bwLXQKg{$>fpB zPraKxpF7{F<1PjwZi{%mUq4PcI+wdvhnKLH4xYBuRSRUP=PkyRSM+Q2aSw zd^AF`c!7qSM)Qy{n^nX`p7Ie2Lacu%f*v&SsJGlKD9-|7l6^LtN?Z0~XX%}l@z~E( z*EtNHpkYM!l_3if=KBnpEh%?$fWPSg7LVGdrp&UPJLD2`X<+8r<$rQB z+7?$m53Q9yCX0En)|!n}+AD5VTGmd=^vPP{myVrQ=KvM|#}4I>_7JQZ0PjF!sb_uv z?5CtzOaS-#WF;%j_>a#4g6u&c;K;1tY}^0$JB5ElVbVW2w;wmTLgG_iRNgw}Z zxN>&jqC(K~F7n@X>bF~NYXvl@{4k`y@b)jm0ZZPXqK8ONy8LI`b?!+4V7%Vd30(iV z3H?)ej{a%k%H8EenE!mumH&z|e8Bf^{AIXILBLml@uAL=|Fky0{(7b4r%l^IKYRF> z;hx$67yU<|{!a>2PZnSX&3dj`{Lv;{k!F!3=`U$6d}7Srvary5M@BjRpVAe-%243m zxo6ExRo4PxEE&KyvmLO4uX7ErTrB(bYyYlQudnuqx)thUMBQJkkkFYL`Pst<7(x=~ zN0B>8f`1mQ)}MKZnSOKJpP$SU5TG8=5PmuM_sqs`dIh|n2w;L(bGQ%xyyF4x@C9Vg z_lC-!HwwT@0aG$&7jYXF{`0KQu}lDVWBR-CwZACem8U=R6Rmme^?&&+r%tDy3Z!Eu zi}~C8OFRF}Jg9+oKK#XMoZAD;>)yloC;zlCzx^7m0hku&rBa8#D5!M9U1{T{kgu$N zd;h2;U?rn9pniYJg`6Uh1QtN@BKzOoKXB$}!X&A7h2d{Yxpv-cOO@YCkHxepJX*ws zMlm&|np&9j&ogv7pCs<}B7#ZcHV{2UaxROYQd%DOaDA4673h{-{dC^>6_6VIgWU{t zB*FH`EDu&hBwt4W29$-_?*8LH2Ti8}UsL?l7CPX{JGDF%B!7Ff=LO(W;p>60>JKe} zfBTxO?}3q7Uzy$gt6+=*Utvnu`#mB~n1%`=el@g@RJ=J%y-KAQ)-j!?1rZyI^f;+7ciC z|K>Azj%+>+IIBsLpDZ&+`x`&$+yGQ&)_L4lkz%*0Wutr&qUI>e_zpA+bL*!vK?um2#<9pM`F{ ziQLeOfg&Syp!2akrbwq=R>*GrrjUaHCxHFuS_J6BC>c&-B6i1^wcQ65-bqh4(T*%v zTJ_!@u5r;uD#c3rdFSa@MZAO?C@RI>yOobAh?OGlZHBL=dNRhVSUv!nbgC;_n0W7OD#tV0M&Yj50U)!E8G;rU0ND1At)WJ1fJb-rEVD zsTl{$k{nw1cy!CmWAGzY?dcHOc2w9&EYPd;^{BeUCc!MCkEljwIaK44^FWyBp3>^p z%QsM@t!>?(dyABUCaS^)(;D<9Z0gAVER?NsU!IaDdcXp8-Vb~m0BC5@4F0l7h@v5N zy&18W2^`Kn4HN4s)Ag_yW1(th!&AhlCTe~>dZVJN_TG1YIBD*~?TZpv}X^(Pnr2gq#G!lbzHkNspfteLQtXN=bp?+#se;Hbpghd=}yR zC=LacE33&KA+39Y$kIrP3tZ0l=9D&JQYJ3vNhl&wLPKG40A z?dvQOZthl74JH-wU5iH^d}9@{TIE64uw5yuoE{Y=zI3S+#-ZDQB*Cl|uKpOWkII`n zd%O5^hWsVyO%dl74vp5)LZ9w)4_OI|Twp+e~W@?w=ltUlMwgqnF zNa;-uFJ;HQDD`yms^6tJy&o^bq9aX!)$?q96eHA%c~WoQIt%0l=uE&^!%t!!_H;X| zBheV7;}vc&Me)~Q!#Jp_;~a1pX=>I-7=20ZXh7y#OjKiMsK>@OocYN;_}F9d12Mc zw@3cqfE>4FeU(#7^sT8Gomj; z3Br%xZoQ~A@b?+LX1<}ca}!2&Qzs8o{HZXp-J-H^S`zd$47w%HO|9I_21+|S?$U1BV*kuGC?HZgthh}C#E{! zL&weOX+?fEntD)j(IWDz%W+#mw%P}`N=yCP|2EMa3?BSB1_yob)H@|dB zV{UbfnWBZ(dak6|+cPZWGSNViS){ECQ=t2}R$`+Vpa}a0s#uW#~cA4I^%fvTs`0KaEgski28S;`bU#Na8Up#8%9_Z_dznu_B!qB0wI^gSzAb z#0bbSttZxUvn|h!ekgu+c!k*d9yvV|D^mn40SG<~vGVY&l~NTII(;74`juM`pqJqf zB=nl(ErwJpFw2jH~f|ZS#$^!W$$UqHlrfqvY8ULVZ;rEhW0+$S1Cx z;F$VA7x^QlluS3pi~S3Oy5+NCW0{(RFP!)S$;8@SPXl=faNJ2Er+-vOoK)Coq$Ro} zz9aLIWgC;+t6kXRWA@v6&lR$&tg&r9Fm*8^NgGj#(X%`WA_e58%E+n-N++v5BeEvr~8vv}ix#ok#(-rr1tO)E^lL9Ef)OAnGo|j&g%e#DL69ho? zVqc94hjr|jW)Y^RJKMt*WX)gc($ju!R5@#8j2Ax1IytsAN78I|gCTExmHk@uixPz) z{B23YF1Bwp*X~Okc4%Z@t9WfYRx0Pcy~u#XcG*CA841fQ=E~4$TZ@2MF8!+32mDW6 zY)vDrJOo5nRn0Zd$Bu=6FShJv5=;3u@)#G!Yg|(Q%BcHd9J2i)vLM#_GAvZU?l_Iw z#iIgjgUe=mU_a2MFN6W@d zJogNd@p_BcIX|44Q{-Syh(#X*IG%JOXB#ul;1Aw%Fo0}3Xo>9@;? z@3o%Hccoc|%{MEF42K}@)JCj~K^9>!f>_V_K* zj-20P+2pX_*`2@eJcl+MI4YlslgQ5cT58^|TBZK@iN3oPnmd9FW}nWN+0DWrWE(|$ zh`MeHrA;u!*|dFL#Js1%>(KD>HI_;v1bj-2jB2VtAv(@)46|l!910i`q>TY))y_u> z%`+&XP@9U&tliaYiei1cd|JD_=$_2!(GI@A+jd;JdG-6d_g_|7+oKK$KfVT& z<5ymvry4UMxBwCC&T!rdux{&d&4TOGky5bb4JfSuW)(dZ1Nb4|9aW}Zx7+XZS^N%M z(e*ZuTa)i5b|S6!-S^%5rO-9=Y|&gk79zHKGG`;jHWoHj<%rnflH&khQGq#}W$YK~ zIEzB-r{hP%Ge6d2O1-8H`Bys(hRc?pg_BToM{96fzcA#`=D7_Vq{ST{Mznm%%D!lH zWO}k1>U~-pf${#V(2u)eqm3^%g(YD=XD(EW5bAbv^2pn`wGE9rR}V4Fw94;acxB0t zO+|Bh(1V(^LPpLFi$JD0_krg94~HV;1>D`oamlZm$(w6so#QE7=1=`HRKOg`663h=AHHV~7dfK=A1v|Bh^U!%qXA0#; zRhHu6D)SXtoeSM_EuD5tN_2hEPx~whYEZoX<)`?AMk+Ve6}7$CrJtyP1-8DEeVpgC zp#EZ}oI$Ot?hyPrM*?@gK=LE6Z~kP&FBIBF>D=VV9QF3QqjQMf1<;m<2b+_JgaqT35ewtlRFFG(bkC%>?Pq(9HvQ}uxH^13 z-DtU15BQCNq{eCY5xc6;h|?1<0!jAqk^%t`;jpnj&`q0e#H3d6DJxf-U! zlIE08)STL(WC#Gn#}K97{gsQ;AlFgacrF%7{;pDQ<1VGiVx&9o?m~vd8C!uz0>;v) zQ_u$%rg-igjhqPQK2wJdaCqwi z%#Oq(4K3XlEzjvMD5n_`+FzjA$KM*uuMpm#(i+WHw+#?BOc#m;3mz?uu!# zBjvMkr)Ilsv1*%{RUhoI{YL{Y6GW;9VSJkII>*b0X2$Y}uWbo}VRnV(-ko|?BRW#s zlE(n_-dzk@Qq0R`P+gn^IoU^N%$x3v573Zgb#C`N@%%(Mda1CLkf|Ooy7wh0VZeaB ztU3h1e;(~i~^`iaHX4P9APIHo1`$Nt<8xiq(ibAg%{T*t&I=avt?6vDQn_nyC^ z1A0ZaHPgrUepa;&WEis}~7aH3XHb{chort8(b%X}w&@o4Ah;j=B=V-t>;HcgIu65fJ> zGM}VoP!+upgiU}fd5Q;5#p2HX1vBWu2PNf-&Cr&7KlTx0nQWECtcFF?T6K>CTlUY- z3e1=sj=6UG%Vyn|1=pT8pLHtlpm%k=pMQR7Yr*Z!LiIiFSw1h9*)9%=L&N*2^%5W< z`=Mn#X7)LiKAHBqnbwb8aaZOFpc&}MNGieIQpzP^TB|TtGi3o%aXv^j_QP`e2NJ#m zi*n@(6?Vs$(#|)&D~G?@QmRW~WCF7;s~F4ULFl7dPTEf?e(+EAk;gOJ;u_^I?R_ZK zlH2UdxKAw0%4_rCdGq0-n0ELVR+PGq{fT_{ZPlWpDNqp(qHD2f^Q;L3756F|Zcs6S zzvsN0ny1cE%PzScE72LJ!-;lXGb^kf$aGNjAYAnz?!G7O4dp%V^*^bY4R+_B9yc6p zXzEj!x>{_5x3=ivvj0XzBNMCx| z7#14$eeSrmDUXbh`aDmFuIyMe)7#mLaZjBN;?uLYW87FSqQch;rfL=ME{}}Zml{vX z?&Ejw?=-tjW^sv(mI`K14YM)wH;|f_KhV1`?6u*LZIhIH4@u_5(6p(Inn zDx-T#E|-%=Z1y*3qU!ySc2i*Y6=seV|a*8z(Yhh87RZ)P|w1X%#~&f$O~(VfUc z`(+VC)!G@pH?UrOKTwaDAm)*RxuycWeB+8lb|bFL!pEn2e=ZRT7Ea@`7XSxn=x_fJ zW|VR_5Qv&bgFMD=9f-eEN7Gmn5?DeLtgv-HRg6AQ^hs(7pU8a5NsS$s29`w#{yod- z-S_;o7A3d5rirP~iItWI1seoT-abobMuC>}`K<1zc8gOEtvR?4cRTkUoMc{ljPviK zXtNzhHjcORTXt-%&StYq=S#JI6j+$kyJc^L`^XLEe5`ulyCX$4 z>P?)FQJ3Tr7D6c|R9VFWdz;mqr1KnzOvCr8-s6$mE33yNJBWSZx37Oq*@3kuBHZTz z5Ikjr)kBlz>|DbKU0VgatC9~A2l4A1ME-cClR>*>AH+c1T}A|G<(RRcP^7#fl;W&; z_s7QaAGy=Bmj%-x4#|(M-Vi}Iv^v>kmA#6*DMo+HS5C&c6`!!>&>xawJw1RX z4lY_XQP`*1mr$2h85>UK1FxI2W{~z?%nEe(Go|Y3RoVu4Uf=1@3H7gQ!RvOc#qwjf zff~N~{IlI0MiETrl-zs{aM}mY8tNw@9~ZARS8Vheqb2M{5M#qMLO=xmYTgDmc<;QT z-lF#5mJkG{G`PoZcPoRs)_@zLBekNNhJ;6`7!RC~9D_!&wiHf=P#0vm6?G|Iar zzO{#q{+`vG;;AI?a0b0xL|HlZe(}g03gWmUAMTou?9Lo5av#p#tCuD-wYWhpfU4-vXHyzKYHd~q`))ZkAXtcSl zGG!tnC2v}wQfpjhOJi3L{B8qif&B`fm3 zLgu<@1)n2Mcy?mdc0D3&_^BrR(3QD;;lu=E`|cEV&*nkG`om4oz@XFtsID;{40TT^ z&q{Vr&Qat%L`YyC1Bm@s^V?AOfn2V5*(`oPtv6Up*|_9Ix3$X;i<$>ymQ($g&L8^P z^-PfT+Q4z-ss~6e|5^q~J3lm`@w9u*37Y$03z4;Wa!8=?v>17#xYmwK9un$fdu7nd z?0SEOVzp-ed+(iI;wA&8{#u#gnM|yAch+k%jq+FvCaNc2Ug;fM8MjI(Yz<2-h4tE3 z`Tlx#DosySoh>Jr2d+(Y28S<3ThEr_?RUZ@iAmKjTt}~}%7x8GbE|{wPfn;ttg{?T z421Sv>^A8u<=)Qqvb5>*$ko~Aaaokhg=BxJ2=xv;g_zhX((N0>t|9~iKIbWQe#^nU z!_(OIY@)FCt*)<%zIL*fff+OMZ1+h@ZJNpQp6BG^Hq`UEi-^K>er%K1SdV61r%b8SZ!pth^cv9mpa8HTh@UK{2y&4-5O z9x#6G1+Ay6o@tU56y>^*!m@n(>Z~{c+ZSE!LI*Lre2^V^G2CKrSeD8g5_diQ8kKG5 z#-w;byJQ1jIl21=0y%YRm&LI;;RqeyNp@Tc?*fVk%^{In@Qh z7~>P5h^8;}iC>S^7juq+pcwe_!P8xxU_QOxh0ARVlK#A=5!J`m6l73tf!G+h>`FUN z^VzfJ0+HM$d?|(nju^^`pX_)tgesGi9Fk)Uo(CvTr1 z4yOxM`Lw4Q)5D3`Q^JKWjf#!asP7(Rk61lL1!Q-j=N}5KER--UVLK@-79?F8)-Xm8 zjk|WvIE^~osDw_7uTg~l^r9a?|3${ zhJQdj#%;n&)muskaUxpu4%?Z*PWk%MBSv3$&I;0a$$3HW!)DTxv4hOLj#Qp6Mh0V; z2)q;Vc{+%zc7`VsF{lvDVpXu)5UVY;?Z1UPg}=R;1w9y96&oul*EEvVgM(`b^@rRH zz5}13K0>wvpbW6|Nkib7)&9L-kM9EoSfd8Q3!U>8Q`)kOnz=vL(mL%D09Oj+=;2?iudSVf7 zPdVLPY_cxIb1$Qvxamd_hkEvs)>hN{`nr(rRyR9;_J*R5FJr3taQecyeOt;U6HnM< z6`&AsuqfidN6RAvlv`0p_}jQ`LlG4;{YpS#Cl_WGj>WZd^M`XI-Z*t6B?`tu8=Loa;c zinb$V+#94;yi}8zr<}fqK5oV3H3&OtIg1xGhYf+jw>)xUD^)v2b`q3gfoc;O@~7_L zvD)nynBArl{rcvYrD3fC95=`9;6Heu8BJB-+;%e{p=_(xx?ekPv{(pw zN(yYjKA}ymNs}UWui|EE2Sxmks%N@WyHED2KuFiTg;PrjZE^SQq;g=~u+b8M1|i3Z zQyP;}gYJddv&=_J%R)x7kP0jRjIk%UA3IfKp#=Z8l!wyt2{hLk#VVo7K(Qp2F^Rpd z%5`FyLST|B*4RDXxYWKOVB0x{bjF zHG)~;*Su%HXc{_xM|C^)c|X8dccv=n%Pk&f#rBYHC>%#7z%ewZgi|hkv+}Tj4|I6t zG^?O59KkHsHh-`d{_vmfyoD}n&T!v6RM`!RQH5H4H65<@vl(`#BUcSmjbeGpR!#Oz z;6~aQY*qmCEh{>EHd^^kP>`j4jlX(uA51boXcs!P|HF{HE;|iP?6{!;Cxu3^%|uh^ zIE+++tjRxwWqFlnCHY8d!Cxv`bf(~iDEE*>rsOs2O$!sr`gs_wX1!4Lwgm>}2L}Gc z-NjF5h0-opsmel^{pMGD9Y>ZVc4suYU+Xgh%fy>A#<7HQ;eMFG6-O3NV^(*tMR`ss zWb8cWuQ^{9&GUQ9=gMv1c`;h-Hv1Jm0*qzy8fmv{R&pw@IW4Y|r!QrVB8g?5dB{@d zMIndhvcy$U-+sFT-0WKnNwW>UtzRs60IsF z_k9~k`9N2j`N+&XfyM{aI@=Y~Xaky#$V&CvR6y)Q5sPQYBWe^0vldko4PO_S7>{-X z^qG%pY?@_YB5oB1o?sD&Sh6Pnn`J$T>K#vVZKC6ki&bX;gvI+Lo5|QGoHHrD2fALp z;d1F?wN}Ox0FwiapRxp744J3_$BWE&$;h6UGl5xP__v zzUEmEd>K!_%S2dI4M}jMo=O`e<>@QAF96oIJaf>zYFMDdo2XrsNG2odwq$ii2B3H2 z14Rv#UUn^X9UQzWb+l$0RGZZ|r~!Y2_ArGMH!F7VU;wkCZPl|-Ibt6hredrJprw<` z88RQt8TL6@MDsIlYJN@?A0y8rdgiFug#O6nA1)#z@fOFbS`U5!b6s;edN#69F$g~v z7_-m|0K||>oqX68Muv~(U=;UksVr!R@r!=uweF`iG;VUU%*GfZDEkCFx<>g$k(Ktf zlLr=+i4O`-5iFaum#p=+UMtbFI#I6^?S`&!gn=r}ZC)D7bJhx0=Lgr&a$g^iRX?MF zEj0G+)AiHZzX0pUyc$NEkB^7Zgjs`hGvwX6wVO%9mSLKe2E)aHFyA>G1134%gM~j$ z=G{;<8HWMK%}ADNGENY-j`IM2m$G!xrFATaTm6OKlAr4j*O*G6Jz3xF#jqQNlG8p( zJJ};D(0(Llv-^D|Q@#zruE=w_7+|H?{ykR0QuUM=VcqMWN&33zl_rmV%;^34@qsS? z(6rU^r#)7@)`L8a%A2sMigp#)^y=sPY4d4a*fE_k3T351+vl?_pG1OR7V5q%XX}pd zp2H3*hs`?($rLmU%IYmpE1N(R#SQPq1*nKslGdMvtQosb+&zuBWkXN<^uPg3Pp-V$ z*JtlPdgDT!H)=w&SP;2zyJof4W+o@qUv2fa=ix4@B)QRbHZ$)t9a8eveHZcZB(;{4 zpiTD7MB&1yFb8J`>E^pcMO-2t=X;n8zFPc)%<_xY`=u#)&diE$@Upy2j6JN@)o*C| zHYfbJs&1DlQH~JifZFGVudM48jd9iu5!pt*$u=K5ntm+jJyUbn!*_89ghuOe<4WWBFj$N^|t$YR9O~y@Rm?0kE^IJ8cn? zp2bNK@ydnknE@b6dQU3q(c(t&akQ2RDTIOMa7+Htab-~ph3Zw3&G|_GeD;@Brtq81 z5pu-H&wARR`Q`xee<%qUYlKl(tlnm2LlW|>)pH#ZcZ>9m|qR!hFXe2P(QN1<2R z*%4bO_C+5xF>CK%S7cM_TW{YvdN9#lnw6nPzvK*gW?2O0)%`5(PpBn#fJS*$SdXV` z$_gpIG{nc1YzBj7@_A5K>=tvz5*Iysi4Ioj?j7>Kp8H*KA`$iU`8ZY6+3sYb@0f(k zEnld<-THBZHr#*T`7kb*JEkHlg6xgJhZEOITV7Dg8zn53U3)cNV)%6%1J2yXgZbl4 zk&K-ht5tTGfIqlJbvJ(9Y)a|8$mOac=6XGr1C8=0_BEMcxjZdIE+a;Mc}#EADTT6Q z!Ba|6a4u5JURJ6M*6=YxZbqF{#lw-3=l)#?EKY#+Y!U!XynFRlYQD;@y#2*DH7@hR zr;FWKY*j%BaaIai?hCr$xSNeUQw+-%A&X23eRHab$j6IAmqc5(hE)(Agpxw%a`est zgC47{hl5^PHnk0KJoO2IeL|`*<&z?s@_2VOa`=K5qrt(KZ0(kxr;2(~b^CM6=xo&y zPdxnj_b}V~eF_@(+-P-j706oPA-)o&etgI5igb601CG{wizu)6*e>m}L)#&z|FmYy z%cR9ueRc^pjGuQ)BRVIGRQ&J*+L?M44G@)SpW50~w(NGql7$bxOJz3$BNr24mm%{x zQ%SeftP+8a<3HKzQ=iTPA#!~RHS^=9nyPwNCtPpl@vCC#SI1rvMtH+qjUvTI8!Mlj z8!vJByE@Di)ohsp!SnwnsugqF(hvw?gy) zvXZR=<2dfzQv%AtHM1BG@Pa<$){4j%P3Tk(4 zr4hdr3++`w0-M$QZngOcArBnq$TV-|2gu=TmF2p5lAV)N=_4WjI9eiN$UP*ty$#BF zw_UryF@OK}NRs8VX(Uo`LRhTny?ahpUJV_=Y*=U}ld9-q5_z)^eG6X$VH9tQ8M$yabb34%T>kk?aTbnllrZYERbc zo97@e^&_V|6U*lrA4G$(fyD#ZA??>zTl^_32rMa=! zHMvG;)57lDV$9T=nSiUeu6T@;zPmS9qyO%*X`LLRN1S+% zIJ+DV&`q>HK0U%gg8Q6041(4Rg;M%d%jzCsnU`NNam{cB-8XAu|90@G{XGqeB2ied6s!-Plite{(U^w?^|e|K(6*Bn8mkX_V%sYCZw{k@BHXyp+5 z-fc_r9mZ|7r9q$NRypJV_^pLTXT0h_mP@yC<`Wol=bev-97_C)DN>-e0QoYHY5SYCY?tZS zmt>3Fz^$Dea9JCAVv0}bT!v}&0xmV7JSHjl7KyC8ISHi3kKK_!vF=61w{e|Edf8Qm z2V)FGd418b@p^v~_jV!hO8`vGQC%$IBY6vc$jo>*E0xZtDYd+vO;vQy#qkZwJ&*?3 zfhtr3_)=Bq4IKR&Oejef117nT*XISqM4H2uCpo+m4czk>J4R=nQn7r-0L~zcA*Yc` zJ)&Dm9jd&0yl@9pC!BEHjNV-6%h4VRh+F1pMSaNc*9^%?@&1NtOY{eN3#qh2#p?Fo ze&8-O^4Cqg5hXO2z0 zQS#cDSs#Cd*@mZkBo}dJwDX`I7;6ZE!9mZ!x3Xe#Ar0F}u1C}mbk%l#{OW=MI&|W7 zHW;G1tH%NkV^!6=dW_jxJUz157T9c0J z+Mf#Rd9F9Nbk&f zwaFBxk6Q&w9d7r`n!rxG;bN9`HT3BmaK{pr^^1|~ z2l>wa$1{hi4eEZU&FY$<2IwK9vtorrrftG*P?fLsv;6jt7NgMbAsHvCeS{f7ZKK)Q zZFTYeCol7#fadgI2bjCtTS250NKob42kKdxrHVZJ+J}9anD5LFT)ckYo|oTQ>rI4s zZ<~@*%tzU%dw>K7%I6&KL&0C2GQiLKX0UCBQH_NZ+j-QxP(w{cd$7Lxg@Y!=Uai~Q zohkK(gN5UZ7FmP)Y?&bF+w3eA)z)NTYZYC*l8XK+s~gsogewN3H;1p($rE?h&LOKr znyc6_t2V1#S)4e@MJ-m^4?7qJeBcNn6N95FvdgVCn&*^TygCkjf>(yAw@FIK^2gNz4OvQ(cPcya`zLC~6(gBH(Q%;`$oC#NI+IPd zc^<}`?-g^s4gD=wQ$r7|*mPC$9xW>qn*$MK%p6(Hxu6CNX^FMZT81eaekZd=hROOSq zdB*UHGAI5BblHnxpS}FrK;3gmFq##Yu%~{B==%w}KM4n|c*2kyb^o^adNQia3*fi!xLZYcDlXG)dF!|)PFAj)8z`(iG{%(X#Z}C4|d4-qd#Hm z#JOAGc)V`23&EWaoPVDPF6xM2{bI+P59-kKs@0YTnG*Teycx@>`^mqLe`HtXF5^o7 z@-ojBRV4cMef9?iVKP?{A;&cvAa(5h<7{5Vb_$ufU_8Whlfi7yk~vY@)V$B{l>-k#D)hoO6G>BVz{b0H+91ZQG{Fiy@H9WWy z;w9_TCxW1aeGPq=BFJIq4LZJ=4*`~;QkVOF3!`BqbLy9PlD9s93S`J3QR(Zkv8+ac zD}0;X6?`M%BGc{GRYsLoVf#MK0%_-BBH{y@rXWJ(wmJLF|Dc--^`1oA9gG|Inb(~k zTzJFHrl{_g6p>Hn5|g-*0y_WzKFJ9r6-rMh4OWfcKA=jfKe7(j{z{AN>cn}?<9DjDqKhs@0gZ)`-SgUW zj}BGXkXq=a?SVGiEkTx zMTz43!i&9ii5PnGFLW;VM4~Id{R5lGf3N?fLNnOiCtHT(SZ&O!8NbtA$@l(1^90`td%- z_R14K2jrypAyrgshc5syEwx)7oM==ODw+n8An1uS_PX8UU6fc9nK_f!Om9@TY@K>2 zjJM06SWVw@N!<}WKh4Sh4$gC#)dRimOqO7OA4H)<*|7}#XFU-(y4P-)O6&-`u3KhN z^0l9WLE!1ju$llyzTawC@3T8jG+gz{Zr{BXnIm)I_`I4?WYS&inC^SD*X+KIhUj zsFy%o4&g$~?e8NRcbaE+ArUu(`lJ{h*8Mp9^gvQ_ypn|De^}+7fw6>qvq2-9EUWk8 zJ8w2s8PXBln4@)(%bht8qMrqcY{Ks(b25kr^{Ak_F?V2#JL+~aPar;l1ogRGj-=Gt z>Z>A#p1fAdkW4W@VYzIuxkN#AhBm(DhhfYOF%y|DSAL>54cPJ6dy?S(T%s3958RlT zpVV&;7kD(S=UMHy_*j`-0OwtMO^tI?_9q7&O`*U=LzD+ja`C{i>PBpDLXlsH8sA)g zk?Bnh3>Yd0AcqGyZE{W7EnpPmsRA<05Iu+5F>5~s28P@VlH_>V1tb1=w~?LQ_bHK5 ze#9}I^Gq>!>cHS>^y~8|rJO^t5has~t-F+?3jyQm`$2TxE6vOhB>|ReUxExccivv4 zv*wgPKD;34Hyf*pH?eEHX0ZBf|2tP7i^hBx$ksy(?!y8xtg~#2B+A+I`@=a|tLiPg zo6ppKa&F%ie|woY+KGkNAn9;FKK6lD_Gw`DbyFTi%R+b6o>b^;I283M))?7|Y=keq z<=m6(SuVs;P`X#Ygab69u+QrFIAu;7!9_oyC3X5)I|ARb-&$M^Bq)_uvj@xwcep#pg}7cFY%S6{KidPbdtyhRIAKWc?Hv|vT2AZEw$UCfN>x?EgC!LNhprv1^d3rc$+|h@%3NUNR7_Pr z$vo^KRFS#Jr@k`oLE)bDS)cr%Qg8oRA+Rx=wd@w2QMp^W3)jy*zQ|vQ+Ct>*$`uE8 z@#t3?U@Hh#MEr&EaUzZWS}i|}ICd7tj6cz7k&cQ}B-Yg(cOgv0iJ&TrgJaz*{bRR-G0$!T_yD)8ym zr`pAiB@OiRpQC3x=mfdHo*W0pVr7QW9R*^zes5W9o%;TTS)h^UKq)}6dp1=bXugr< zn)iawr)xASHYd$Jcly&y!82+O0Loaa$dHOdIh4DPd!Q{rX)7z659=0qUH>!(%pRkZ zP_;IoLVo`;e00FBqVau%aP)|)&klg)b<>`aJu(W>jjPMy|aF$37ff~{OpB>L5hwVa_s|l z&Ec*c|8nI*>n6iy5;X5y*)j6>_cPoXcL$(GLt3}>O2it*|vG%p0h0bHE2NjdlV z3IdP(rK-Z#t-~H5tMg3vg)hu~Kog+DokPTa4Zm6{Ziv(5%B;Md=de2IHit;)neQC- z*J_*>Jrb}lsM9VFYr4cJHf*U*x~YP3$}86w$BY>p(5j?LVbSVmY{FC?KFnL4ua00q z8X{ig8{|cMg#iuGzdi5fHPKqb~$^ojZ%!R_3oj!fJ8BJ{tG&EjbPlhaNZ&TmP_22@J!h<*JB zsonP*{3S-TgSVrmNU&dIAy4!4rS&aG9sT%u?v(*psq2(&R$5a{)Mfc!4FN#9XLe1e z#6IB@$BVnLoi6*U?{1#{2Vq_m+<4L?dx_guR6iK^n&q7i*QM_a2 z**2Pdp6xGVzLl4{ovuE04-^S&Zb?uri#dKYd8t?`7U{Y)b^1QYP_N@P!bK~wsKd!hK&xiGcRel<-aQI$B9u?XZ4qq`s+8-@1%?T$O3W$Q;n+$=IvcR+FGS#IGOMiuP{Ob&o0)Yx#{svQx-PFJ4317`l( z>LcynE_!q(Fx;}3E0H}|`h!{miSg_E?UqaEPSfvUBtV8Kis() zko3ctzzf1cLS>Kd>zz79Nw4@s<|S*!y$=y;VhJBVPR{tci0r-7H{jJ6mba{~avuB9 z4R3^o6!|Ut418adoc@X*seCPg@X+j5$He=zj#|fS)NsR*V7p{yGIBDG5Z`P6{EGke z(t+PDoU`D2c98Zpt!HpN8rf}LJ6WFWXRE7D=$$DIJULmK-5kN@<2UV8E}FhoPv$^< zpL^=Lw5$Gl(<`!F%4ko)bK(tUT3ww)LETA{7dOP5ZU4>k z{v7*r3rP^{;!9#Iapkgh5mM-ZLAe#S_kk6&PijZ|;SL*y^5Uh}FY-PWBrPs1oD<8b z1~w-(v&CjIHy9buuRB>S79RF|U5jD%m-1UkAMx|^n=FEym<}Nq7aeiOxTjjZ28e^g z$$eePYxs2X&u9MKR|cjL^*oR$MZuYI=E1jOLoDJhY%US53%kcfR zs^h~ZcqIO-^CPONd~LXrvRq=aW2sm#qJlwd<-szj-KOyygy_oQ8%*^}a-vs%W~~vO^J*)Y`_0^&w%T ziVbhxuKgO2xB{jkfhp5A`2C`$_an+aMg=$h2PC1Oirc<>U6F4h$G6`X&rm8iZ;yEoa8suM1V*JB~{uEI-88%bVW2yw#{n4@fh0}pCRtuD;Ajz$SAeLRkDdA_F--I8+4Boj3hT4tCv^R z@1E|ENX1}A>R@zgkSSpcD;6xS{tw~7O*#Q|I;Kdkru?aK6}o5vakMqbEN7m zAf@47uC@JGGvCb4`EH8xcSZNEj;ulaUd3*)r+<*KC9a;H-U_o74B@^!dQfQR+y2Q({e5@~ zX0pw9StP$2Z8rH$ZZaCE?ETu->Ug9>@S@BqvvIPzNA%cQm~>uS$G*K@xH%$gsjdBJ za6}AWtv_G!_G{W#)~BLHtu^ug-k9-mt$DXnadaGK-~QW_ZdZW{{fwzJK_brvf{ET0njJGk5;U2mH?s8|;8H_|GH&NBN(X_-||V|ISKS$byx! z(68^fpa1W<|Bqn**AIci=k`AMS+ITW`2WVT_~FA~;l3NHxN<_8@&og4GpDi`3-`FW zxgEN2Az{KR--eCeIEtTT0v-ET9>0I>asHcaFIi_Tw~sxu`6K^vd4I#~eBha!SlzQ= z8dKPrD?#D+amY6?G)@6#`PPM=I4tE5Sh2JrVcE1@ztnJfgJTH`i?Jq;FRn79!9kam z#dzzLtL9i6haiV5s4C3xOw7R1ssd`u(o~qSa_PE#xI&$gNfTHDb2(4ORkA4rcmoya zmVWic=}`|(jQfw4QEIJBeZE|)T>vsae8!uP=?>u<+*y1yGm|-_QM^dar#WP$NR3Qt^g*G*1OYj zDs&Z@@wIE_e>sZ)7x`kGiJ(a1!nJF0(<37dgT>3@y>JDxNFXo(j;xw(jLU-o;=pK% zy-}-)Qvs-{CAd289IkkBhy^Cc(!}peaVii5CP&vbyDV|WLaCqtp)e6RBeL<;Gap?3 hyy6P<=MV^pf9yW1vJ(0)`Zq8Dfv2mV%Q~loCIAhJJktOG literal 0 HcmV?d00001 diff --git a/tools/simulator/README.md b/tools/simulator/README.md new file mode 100644 index 000000000..e74fbe2c3 --- /dev/null +++ b/tools/simulator/README.md @@ -0,0 +1,9 @@ +# Simulator + +We have simulators in different subfolder for different scenarios, each simulator subfolder contains its own README, They're linked below! + +* [alert forwarder simulator](alert-forward) +* [managed cluster simulator](managed-cluster) +* [metrics-collector simulator](metrics-collector) + +_Note:_ these simulators are for testing purpose only. diff --git a/tools/simulator/alert-forward/Dockerfile b/tools/simulator/alert-forward/Dockerfile new file mode 100644 index 000000000..2cd82ff85 --- /dev/null +++ b/tools/simulator/alert-forward/Dockerfile @@ -0,0 +1,24 @@ +# Copyright Contributors to the Open Cluster Management project + +FROM registry.ci.openshift.org/stolostron/builder:go1.17-linux AS builder + +WORKDIR /workspace +COPY go.sum go.mod ./ +COPY tools/simulator/alert-forward/main.go tools/simulator/alert-forward/main.go + +RUN CGO_ENABLED=0 go build -a -installsuffix cgo -o bin/alert-forwarder tools/simulator/alert-forward/main.go + +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest + +ENV MAIN_BINARY=/usr/local/bin/alert-forwarder \ + USER_UID=1001 \ + USER_NAME=alert-forwarder + +# install the binary +COPY --from=builder /workspace/bin/alert-forwarder ${MAIN_BINARY} +COPY tools/simulator/alert-forward/alerts.json /tmp/ + +USER ${USER_UID} + +ENTRYPOINT ["/usr/local/bin/alert-forwarder"] + diff --git a/tools/simulator/alert-forward/README.md b/tools/simulator/alert-forward/README.md index 2e8fd8ab2..d2ba24d94 100644 --- a/tools/simulator/alert-forward/README.md +++ b/tools/simulator/alert-forward/README.md @@ -1,61 +1,167 @@ # Alert Forward Simulator -The alert forward simulator can be used to simulate multiple Prometheus instances to forward alerts to the Alertmanager in the ACM hub cluster. +The alert forward simulator can be used to simulate multiple Prometheus instances to forward alerts concurrently to the Alertmanager in the ACM hub cluster. + +_Note:_ this simulator is for testing purpose only. ## Prereqs -You must meet the following requirements to setup metrics collector: +You must meet the following requirements to setup alert forwarder: 1. ACM 2.3+ available 2. `MultiClusterObservability` instance available in the hub cluster ## How to use -1. Export host of the Alertmanager in the ACM hub cluster. +### Run locally outside the cluster -``` +1. Export host of the Alertmanager of ACM hub cluster: + +```bash export ALERTMANAGER_HOST=$(oc -n open-cluster-management-observability get route alertmanager -o jsonpath="{.spec.host}") ``` -2. Export access token to the Alertmanager in the ACM hub cluster. +2. Export access token to the Alertmanager of ACM hub cluster: -``` +```bash export ALERRTMANAGER_ACCESS_TOKEN=$(oc -n open-cluster-management-observability get secret $(oc -n open-cluster-management-observability get sa observability-alertmanager-accessor -o yaml | grep observability-alertmanager-accessor-token | cut -d' ' -f3) -o jsonpath="{.data.token}" | base64 -d) ``` -3. (Optional)Export simulated max go routine number for sending alert, if not set, default value(20) will be used. +3. Run the simulator to send simulated alerts to the Alertmanager of ACM hub cluster: +```bash +$ go run main.go --am-host=${ALERTMANAGER_HOST} --am-access-token=${ALERRTMANAGER_ACCESS_TOKEN} --alerts-file=./alerts.json +2021/11/08 07:03:23 alert forwarder is initialized +2021/11/08 07:03:23 starting alert forward loop.... +2021/11/08 07:03:53 sending alerts with worker 0 +2021/11/08 07:03:53 sending alerts with worker 1 +... ``` -export MAX_ALERT_SEND_ROUTINE=5 + +> _Note:_ you can also optionally specify the simulated alerts by `--alerts-file` flag. + +4. Optionally specify the number of concurrent workers that forward the alerts by `--workers` flag, the default value is `1000`: + +```bash +$ go run main.go --am-host=${ALERTMANAGER_HOST} --am-access-token=${ALERRTMANAGER_ACCESS_TOKEN} --alerts-file=./alerts.json --workers 3 +2021/11/08 07:03:23 alert forwarder is initialized +2021/11/08 07:03:23 starting alert forward loop.... +2021/11/08 07:03:53 sending alerts with worker 0 +2021/11/08 07:03:53 sending alerts with worker 1 +2021/11/08 07:03:53 sending alerts with worker 2 +2021/11/08 07:03:54 connection was reused: false +2021/11/08 07:03:54 connection was reused: false +2021/11/08 07:03:54 send routine 0 done +2021/11/08 07:03:54 send routine 2 done +2021/11/08 07:03:54 send routine 1 done +``` + +5. Optionally specify the alert forward interval by `--interval` flag, default value is `30s`: + +```bash +$ go run main.go --am-host=${ALERTMANAGER_HOST} --am-access-token=${ALERRTMANAGER_ACCESS_TOKEN} --alerts-file=./alerts.json --workers 3 --interval 5s +2021/11/08 07:08:12 alert forwarder is initialized +2021/11/08 07:08:12 starting alert forward loop.... +2021/11/08 07:08:17 sending alerts with worker 0 +2021/11/08 07:08:17 sending alerts with worker 1 +2021/11/08 07:08:17 sending alerts with worker 2 +2021/11/08 07:08:17 connection was reused: false +2021/11/08 07:08:17 connection was reused: false +2021/11/08 07:08:17 connection was reused: false +2021/11/08 07:08:17 send routine 0 done +2021/11/08 07:08:17 send routine 1 done +2021/11/08 07:08:17 send routine 2 done +2021/11/08 07:08:22 sending alerts with worker 0 +2021/11/08 07:08:22 sending alerts with worker 1 +2021/11/08 07:08:22 sending alerts with worker 2 +2021/11/08 07:08:22 connection was reused: true +2021/11/08 07:08:22 connection was reused: true +2021/11/08 07:08:22 connection was reused: true +2021/11/08 07:08:22 send routine 0 done +2021/11/08 07:08:22 send routine 1 done +2021/11/08 07:08:22 send routine 2 done +^C2021/11/08 07:08:29 got unix terminating signal: interrupt +2021/11/08 07:08:29 received terminating signal, shuting down the program... ``` -4. (Optional) Export alert send interval, if not set, default value(5 seconds) will be used. +### Run as a Deployment inside the cluster +1. (Optional) Build and push the alert-forwarder image: + +```bash +docker build -f Dockerfile -t quay.io/ocm-observability/alert-forwarder:2.4.0 ../../.. +docker push quay.io/ocm-observability/alert-forwarder:2.4.0 ``` -export ALERT_SEND_INTERVAL=10s + +2. Run the following command to deploy the alert-forwarder: + +```bash +./setup-alert-forwarder.sh ``` -5. Run the simulator to send fake alerts to the Alertmanager in the ACM hub cluster. +3. Check if the alert-forwarder pod is running successfully in your cluster: + +```bash +$ oc -n alert-forwarder get pod +NAME READY STATUS RESTARTS AGE +alert-forwarder-fb75bbb8c-6zgq8 1/1 Running 0 3m11s +$ oc -n alert-forwarder logs -f alert-forwarder-fb75bbb8c-6zgq8 +2021/11/08 07:25:54 alert forwarder is initialized +2021/11/08 07:25:54 starting alert forward loop.... +2021/11/08 07:26:24 sending alerts with worker 0 +2021/11/08 07:26:24 sending alerts with worker 1 +... +``` +4. Optionally specify the number of concurrent workers that forward the alerts by `-w` flag, the default value is `1000`: + +```bash +$ ./setup-alert-forwarder.sh -w 3 +$ oc -n alert-forwarder logs -f deploy/alert-forwarder +2021/11/08 07:53:07 alert forwarder is initialized +2021/11/08 07:53:07 starting alert forward loop.... +2021/11/08 07:53:37 sending alerts with worker 0 +2021/11/08 07:53:37 sending alerts with worker 1 +2021/11/08 07:53:37 sending alerts with worker 2 +2021/11/08 07:53:37 connection was reused: false +2021/11/08 07:53:37 connection was reused: false +2021/11/08 07:53:37 connection was reused: false +2021/11/08 07:53:37 send routine 0 done +2021/11/08 07:53:37 send routine 2 done +2021/11/08 07:53:37 send routine 1 done +... ``` -# go run ./tools/simulator/alert-forward/main.go -2021/10/12 04:22:50 sending alerts with go routine 0 -2021/10/12 04:22:50 conn was reused: false -2021/10/12 04:22:50 send routine 0 done -2021/10/12 04:22:55 sending alerts with go routine 1 -2021/10/12 04:22:55 conn was reused: true -2021/10/12 04:22:55 send routine 1 done -2021/10/12 04:23:00 sending alerts with go routine 2 -2021/10/12 04:23:00 conn was reused: true -2021/10/12 04:23:00 send routine 2 done -2021/10/12 04:23:05 sending alerts with go routine 3 -2021/10/12 04:23:05 conn was reused: true -2021/10/12 04:23:05 send routine 3 done -2021/10/12 04:23:10 sending alerts with go routine 4 -2021/10/12 04:23:10 conn was reused: true -2021/10/12 04:23:10 send routine 4 done -2021/10/12 04:23:15 sending alerts with go routine 5 -2021/10/12 04:23:15 conn was reused: true -2021/10/12 04:23:15 send routine 5 done + +5. Optionally specify the alert forward interval by `-i` flag, default value is `30s`: + +```bash +$ ./setup-alert-forwarder.sh -w 3 -i 5s +$ oc -n alert-forwarder logs -f deploy/alert-forwarder +2021/11/08 07:57:23 alert forwarder is initialized +2021/11/08 07:57:23 starting alert forward loop.... +2021/11/08 07:57:28 sending alerts with worker 0 +2021/11/08 07:57:28 sending alerts with worker 1 +2021/11/08 07:57:28 sending alerts with worker 2 +2021/11/08 07:57:28 connection was reused: false +2021/11/08 07:57:28 connection was reused: false +2021/11/08 07:57:28 connection was reused: false +2021/11/08 07:57:28 send routine 2 done +2021/11/08 07:57:28 send routine 0 done +2021/11/08 07:57:28 send routine 1 done +2021/11/08 07:57:33 sending alerts with worker 0 +2021/11/08 07:57:33 sending alerts with worker 1 +2021/11/08 07:57:33 sending alerts with worker 2 +2021/11/08 07:57:33 connection was reused: true +2021/11/08 07:57:33 connection was reused: true +2021/11/08 07:57:33 connection was reused: true +2021/11/08 07:57:33 send routine 2 done +2021/11/08 07:57:33 send routine 1 done +2021/11/08 07:57:33 send routine 0 done +... ``` +6. Clean up the alert forwarder: + +```bash +$ ./clean-alert-forwarder.sh +``` diff --git a/tools/simulator/alert-forward/alerts.json b/tools/simulator/alert-forward/alerts.json new file mode 100644 index 000000000..fbcf0b043 --- /dev/null +++ b/tools/simulator/alert-forward/alerts.json @@ -0,0 +1,18 @@ +[ + { + "annotations": { + "description": "just for testing\n", + "summary": "An alert that is for testing." + }, + "receivers": [ + { + "name": "test" + } + ], + "labels": { + "alertname": "test", + "cluster": "testCluster", + "severity": "none" + } + } +] diff --git a/tools/simulator/alert-forward/clean-alert-forwarder.sh b/tools/simulator/alert-forward/clean-alert-forwarder.sh new file mode 100755 index 000000000..f5fa9ceea --- /dev/null +++ b/tools/simulator/alert-forward/clean-alert-forwarder.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + echo "kubectl or oc must be installed!" + exit 1 + fi +fi + +ALERT_FORWARDER_NS="alert-forwarder" +ALERT_FORWARDER_DEPLOY="alert-forwarder" +AM_ACCESS_TOKEN_SECRET="am-access-token" + +${KUBECTL} -n ${ALERT_FORWARDER_NS} delete deployment ${ALERT_FORWARDER_DEPLOY} +${KUBECTL} -n ${ALERT_FORWARDER_NS} delete secret ${AM_ACCESS_TOKEN_SECRET} +${KUBECTL} delete ns ${ALERT_FORWARDER_NS} diff --git a/tools/simulator/alert-forward/deployment.yaml b/tools/simulator/alert-forward/deployment.yaml new file mode 100644 index 000000000..b49ebd98d --- /dev/null +++ b/tools/simulator/alert-forward/deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alert-forwarder +spec: + replicas: 1 + selector: + matchLabels: + app: alert-forwarder + template: + metadata: + labels: + app: alert-forwarder + spec: + containers: + - name: alert-forwarder + image: quay.io/ocm-observability/alert-forwarder:2.4.0 + imagePullPolicy: IfNotPresent + args: + - --am-host=__AM_HOST__ + - --am-access-token-file=/etc/alert-forwarder/token + - --workers=1000 + - --interval=30s + volumeMounts: + - name: am-access-token + mountPath: /etc/alert-forwarder + volumes: + - name: am-access-token + secret: + secretName: am-access-token + diff --git a/tools/simulator/alert-forward/main.go b/tools/simulator/alert-forward/main.go index 873dc33b8..7582b7ecf 100644 --- a/tools/simulator/alert-forward/main.go +++ b/tools/simulator/alert-forward/main.go @@ -3,15 +3,18 @@ package main import ( "bytes" "context" + "fmt" "io" "io/ioutil" "log" - "net/url" "net/http" "net/http/httptrace" + "net/url" "os" - "strconv" + "os/signal" + "strings" "sync" + "syscall" "time" "github.com/pkg/errors" @@ -19,104 +22,184 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" + "github.com/spf13/cobra" ) -var alerts = `[ - { - "annotations":{ - "description":"just for testing\n", - "summary":"An alert that is for testing." - }, - "receivers":[ - { - "name":"test" - } - ], - "labels":{ - "alertname":"test", - "cluster":"testCluster", - "severity":"none" - } - } -]` +type alertForwarderOptions struct { + amHost string + amScheme string + amAPIVersion string + amAccessToken string + amAccessTokenFile string + interval time.Duration + workers int + alerts string + alertsFile string +} -func main() { - amHost := os.Getenv("ALERTMANAGER_HOST") - if amHost == "" { - log.Println("ALERTMANAGER_HOST must be specified!") - os.Exit(1) +type alertForwarder struct { + amURL string + amConfig *config.AlertmanagerConfig + interval time.Duration + workers int + alerts string +} + +func newAlertFowarder(opts *alertForwarderOptions) (*alertForwarder, error) { + if len(opts.amHost) == 0 { + return nil, fmt.Errorf("am-host must be specified!") } - amUrl := (&url.URL{ - Scheme: "https", - Host: amHost, - Path: "/api/v2/alerts", - }).String() - - amAccessToken := os.Getenv("ALERRTMANAGER_ACCESS_TOKEN") - if amAccessToken == "" { - log.Println("ALERRTMANAGER_ACCESS_TOKEN must be specified!") - os.Exit(1) + + u := &url.URL{ + Scheme: opts.amScheme, + Host: opts.amHost, + Path: fmt.Sprintf("/api/%s/alerts", opts.amAPIVersion), } - maxAlertSendRoutine := os.Getenv("MAX_ALERT_SEND_ROUTINE") - maxAlertSendRoutineNumber := 20 - if maxAlertSendRoutine == "" { - log.Println("MAX_ALERT_SEND_ROUTINE is not specified, fallback to default value: 20") - } else { - i, err := strconv.Atoi(maxAlertSendRoutine) + + accessToken := "" + if len(opts.amAccessToken) > 0 { + accessToken = opts.amAccessToken + } else if len(opts.amAccessTokenFile) > 0 { + data, err := ioutil.ReadFile(opts.amAccessTokenFile) if err != nil { - log.Println("invalid MAX_ALERT_SEND_ROUTINE, must be number!") - os.Exit(1) + return nil, err } - maxAlertSendRoutineNumber = i + accessToken = strings.TrimSpace(string(data)) + } else { + return nil, fmt.Errorf("am-access-token or am-access-token-file must be specified!") } - alertSendInterval := os.Getenv("ALERT_SEND_INTERVAL") - asInterval, err := time.ParseDuration(alertSendInterval) - if err != nil { - log.Println("invalid ALERT_SEND_INTERVAL, fallback to default value: 5s") - asInterval = 5*time.Second + alerts := "" + if len(opts.alerts) > 0 { + alerts = opts.alerts + } else if len(opts.alertsFile) > 0 { + data, err := ioutil.ReadFile(opts.alertsFile) + if err != nil { + return nil, err + } + alerts = strings.TrimSpace(string(data)) + } else { + return nil, fmt.Errorf("alerts or alerts-file must be specified!") } - amCfg := createAlertmanagerConfig(amHost, amAccessToken) + return &alertForwarder{ + amURL: u.String(), + amConfig: createAlertmanagerConfig(opts.amHost, opts.amScheme, opts.amAPIVersion, accessToken), + interval: opts.interval, + workers: opts.workers, + alerts: alerts, + }, nil +} + +func (af *alertForwarder) Run() error { + sigs := make(chan os.Signal, 1) + done := make(chan bool, 1) + + // register the given channel to receive notifications of the specified unix signals + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // start loop in new go routinr for system terminating signal + go func() { + sig := <-sigs + log.Printf("got unix terminating signal: %v\n", sig) + done <- true + }() // client trace to log whether the request's underlying tcp connection was re-used clientTrace := &httptrace.ClientTrace{ - GotConn: func(info httptrace.GotConnInfo) { log.Printf("conn was reused: %t\n", info.Reused) }, + GotConn: func(info httptrace.GotConnInfo) { log.Printf("connection was reused: %t\n", info.Reused) }, } traceCtx := httptrace.WithClientTrace(context.Background(), clientTrace) // create the http client to send alerts to alertmanager - client, err := config_util.NewClientFromConfig(amCfg.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled()) + client, err := config_util.NewClientFromConfig(af.amConfig.HTTPClientConfig, "alertmanager", config_util.WithHTTP2Disabled()) if err != nil { log.Printf("failed to create the http client: %v\n", err) - return + return err + } + + // start alert forward worker each interval until done signal received + ticker := time.NewTicker(af.interval) + log.Println("starting alert forward loop....") + for { + select { + case <-done: + log.Printf("received terminating signal, shuting down the program...") + return nil + case <-ticker.C: + var wg sync.WaitGroup + for i := 0; i < af.workers; i++ { + log.Printf("sending alerts with worker %d\n", i) + wg.Add(1) + go func(index int, client *http.Client, traceCtx context.Context, url string, payload []byte) { + if err := sendOne(client, traceCtx, url, payload); err != nil { + log.Printf("failed to send alerts: %v\n", err) + log.Printf("failed to send alerts to %s: %v\n", url, err) + } + wg.Done() + log.Printf("send routine %d done\n", index) + }(i, client, traceCtx, af.amURL, []byte(af.alerts)) + } + wg.Wait() + } } +} - // alerts send loop - var wg sync.WaitGroup - for i := 0; i < maxAlertSendRoutineNumber; i++ { - log.Printf("sending alerts with go routine %d\n", i) - wg.Add(1) - go func(index int, client *http.Client, traceCtx context.Context, url string, payload []byte) { - if err := sendOne(client, traceCtx, url, payload); err != nil { - log.Printf("failed to send alerts: %v\n", err) +func main() { + opts := &alertForwarderOptions{ + amScheme: "https", + amAPIVersion: "v2", + interval: 30 * time.Second, + workers: 1000, + alertsFile: "/tmp/alerts.json", + } + cmd := &cobra.Command{ + Short: "Application for forwarding alerts to target Alertmanager.", + SilenceErrors: true, + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + af, err := newAlertFowarder(opts) + if err != nil { + log.Printf("failed to create alert forwarder: %v", err) + return err } - wg.Done() - log.Printf("send routine %d done\n", index) - }(i, client, traceCtx, amUrl, []byte(alerts)) + log.Println("alert forwarder is initialized") + return af.Run() + }, + } - //sleep 30 for the HAProxy close the client connection - time.Sleep(asInterval) + cmd.Flags().StringVar(&opts.amHost, "am-host", opts.amHost, "Host for the target alertmanager.") + cmd.Flags().StringVar(&opts.amScheme, "am-scheme", opts.amScheme, "Scheme for the target alertmanager.") + cmd.Flags().StringVar( + &opts.amAPIVersion, "am-apiversion", + opts.amAPIVersion, "API Version for the target alertmanager.") + cmd.Flags().StringVar( + &opts.amAccessToken, "am-access-token", + opts.amAccessToken, "The bearer token used to authenticate to the target alertmanager.") + cmd.Flags().StringVar( + &opts.amAccessTokenFile, "am-access-token-file", + opts.amAccessTokenFile, "File containing the bearer token used to authenticate to the target alertmanager.") + cmd.Flags().DurationVar( + &opts.interval, "interval", + opts.interval, "The interval between sending alert forward requests.") + cmd.Flags().IntVar( + &opts.workers, "workers", + opts.workers, "The number of concurrent goroutines that forward the alerts.") + cmd.Flags().StringVar(&opts.alerts, "alerts", opts.alerts, "The sample of alerts.") + cmd.Flags().StringVar(&opts.alertsFile, "alerts-file", opts.alertsFile, "File containing the sample of alerts.") + + if err := cmd.Execute(); err != nil { + log.Printf("failed to run command: %v", err) + os.Exit(1) } - wg.Wait() } // createAlertmanagerConfig creates and returns the configuration for the target Alertmanager -func createAlertmanagerConfig(amHost, amAccessToken string) *config.AlertmanagerConfig { +func createAlertmanagerConfig(amHost, amScheme, amAPIVersion, amAccessToken string) *config.AlertmanagerConfig { return &config.AlertmanagerConfig{ - APIVersion: config.AlertmanagerAPIVersionV2, + APIVersion: config.AlertmanagerAPIVersion(amAPIVersion), PathPrefix: "/", - Scheme: "https", + Scheme: amScheme, Timeout: model.Duration(10 * time.Second), HTTPClientConfig: config_util.HTTPClientConfig{ Authorization: &config_util.Authorization{ @@ -163,6 +246,5 @@ func sendOne(c *http.Client, traceCtx context.Context, url string, b []byte) err if resp.StatusCode/100 != 2 { return errors.Errorf("bad response status %s", resp.Status) } - return nil } diff --git a/tools/simulator/alert-forward/setup-alert-forwarder.sh b/tools/simulator/alert-forward/setup-alert-forwarder.sh new file mode 100755 index 000000000..235d9fc1b --- /dev/null +++ b/tools/simulator/alert-forward/setup-alert-forwarder.sh @@ -0,0 +1,70 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +WORK_DIR="$(cd "$(dirname "$0")" ; pwd -P)" + +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + echo "kubectl or oc must be installed!" + exit 1 + fi +fi + +SED_COMMAND='sed -i' +if [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -i -e' +fi + +function usage() { + echo "${0} [-i INTERVAL] [-w WORKERS]" + echo '' + # shellcheck disable=SC2016 + echo ' -i: Specifies the alert forward INTERVAL, optional, the default value is "30s".' + # shellcheck disable=SC2016 + echo ' -w: Specifies the number of concurrent workers that forward the alerts, optional, the default value is "1000".' + echo '' +} + +INTERVAL="30s" # default alert forward interval +WORKERS=1000 # default alert forward workers + +# Allow command-line args to override the defaults. +while getopts ":i:w:h" opt; do + case ${opt} in + i) + INTERVAL=${OPTARG} + ;; + w) + WORKERS=${OPTARG} + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + usage + exit 1 + ;; + esac +done + +OBSERVABILITY_NS="open-cluster-management-observability" +AM_ACCESS_SA="observability-alertmanager-accessor" +AM_ROUTE="alertmanager" +AM_ACCESS_TOKEN=$(${KUBECTL} -n ${OBSERVABILITY_NS} get secret $(${KUBECTL} -n ${OBSERVABILITY_NS} get sa ${AM_ACCESS_SA} -o yaml | grep ${AM_ACCESS_SA}-token | cut -d' ' -f3) -o jsonpath="{.data.token}" | base64 -d) +AM_ACCESS_TOKEN_SECRET="am-access-token" +AM_HOST=$(${KUBECTL} -n ${OBSERVABILITY_NS} get route ${AM_ROUTE} -o jsonpath="{.spec.host}") +ALERT_FORWARDER_NS="alert-forwarder" + +${SED_COMMAND} "s~__AM_HOST__~${AM_HOST}~g" ${WORK_DIR}/deployment.yaml +${SED_COMMAND} "s~--interval=30s~--interval=${INTERVAL}~g" ${WORK_DIR}/deployment.yaml +${SED_COMMAND} "s~--workers=1000~--workers=${WORKERS}~g" ${WORK_DIR}/deployment.yaml +${KUBECTL} create ns ${ALERT_FORWARDER_NS} +${KUBECTL} -n ${ALERT_FORWARDER_NS} create secret generic ${AM_ACCESS_TOKEN_SECRET} --from-literal=token=${AM_ACCESS_TOKEN} +${KUBECTL} -n ${ALERT_FORWARDER_NS} apply -f ${WORK_DIR}/deployment.yaml + diff --git a/tools/simulator/managed-cluster/README.md b/tools/simulator/managed-cluster/README.md index ead780975..a104135f4 100644 --- a/tools/simulator/managed-cluster/README.md +++ b/tools/simulator/managed-cluster/README.md @@ -1,55 +1,70 @@ # Managed Cluster Simulator -The managed cluster simulator can be used to set up multiple managed clusters and create the corresponding namespaces in the ACM hub cluster, to simulate reconciling thousands of managed clusters for the multicluster-observability-operator. +The managed cluster simulator can be used to set up multiple managed clusters and create the corresponding namespaces in ACM hub cluster, to simulate reconciling thousands of managed clusters for the multicluster-observability-operator. + +_Note:_ this simulator is for testing purpose only. ## Prereqs -You must meet the following requirements to setup metrics collector: +You must meet the following requirements to setup managed cluster simulator: -1. ACM 2.1+ available +1. ACM 2.3+ available 2. `MultiClusterObservability` instance available in the hub cluster -## Quick Start +## How to use -### Scale down the controllers +### Set up managed cluster simulator -Before creating simulated managed clusters, we should scale down cluster-manager and controllers for managedcluster and manifestwork, to avoid resource conflict with the multicluster-observability-operator. Execute the following command: +1. You can run `setup-managedcluster.sh` followed with two numbers(start index and end index) to set up multiple simulated managed clusters. For example, set up 1-5 simulated managedcluster with the following command: ```bash -kubectl -n open-cluster-management scale deploy cluster-manager --replicas 0 -kubectl -n open-cluster-management-hub scale deploy cluster-manager-registration-controller --replicas 0 -kubectl -n open-cluster-management-agent scale deploy klusterlet --replicas 0 -kubectl -n open-cluster-management-agent scale deploy klusterlet-registration-agent --replicas 0 -kubectl -n open-cluster-management-agent scale deploy klusterlet-work-agent --replicas 0 +# ./setup-managedcluster.sh 1 5 +Creating Simulated managedCluster simulated-1-managedcluster... +managedcluster.cluster.open-cluster-management.io/simulated-1-managedcluster created +Creating Simulated managedCluster simulated-2-managedcluster... +managedcluster.cluster.open-cluster-management.io/simulated-2-managedcluster created +Creating Simulated managedCluster simulated-3-managedcluster... +managedcluster.cluster.open-cluster-management.io/simulated-3-managedcluster created +Creating Simulated managedCluster simulated-4-managedcluster... +managedcluster.cluster.open-cluster-management.io/simulated-4-managedcluster created +Creating Simulated managedCluster simulated-5-managedcluster... +managedcluster.cluster.open-cluster-management.io/simulated-5-managedcluster created ``` -> Note: to make sure the controllers are not scaled up again by the operator and OLM, we also need to edit the CSV in the `open-cluster-management` to update the replicas of `cluster-manager` to be `0`. - -### Set up managed cluster simulator - -You can run `setup-managedcluster.sh` following with two numbers(start index and end index) to set up multiple simulated managedcluster. - -For example, set up 1-10 simulated managedcluster with the following command: +2. Check if all the managed cluster are set up successfully in ACM hub cluster: ```bash -# ./setup-managedcluster.sh 1 10 +$ oc get managedcluster | grep simulated +simulated-1-managedcluster true 46s +simulated-2-managedcluster true 46s +simulated-3-managedcluster true 45s +simulated-4-managedcluster true 44s +simulated-5-managedcluster true 44s ``` -Check if all the metrics collector running successfully in your cluster: +3. Check if the `Manifestwork` are created for the simulated managed clusters: ```bash -# kubectl get managedcluster -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 True True 2d2h -simulated-1-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-2-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-3-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-4-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-5-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-6-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-7-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-8-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-9-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m -simulated-10-managedcluster true https://api.obs-china-aws-4616-smzbp.dev05.red-chesterfield.com:6443 Unknown 1m +$ for i in $(seq 1 5); do oc -n simulated-$i-managedcluster get manifestwork --no-headers; done +simulated-1-managedcluster-observability 72s +simulated-2-managedcluster-observability 70s +simulated-3-managedcluster-observability 69s +simulated-4-managedcluster-observability 67s +simulated-5-managedcluster-observability 65s ``` +4. Clean up the simulated managed clusters by running the `clean-managedcluster.sh` script followed with two numbers(start index and end index), For example, clean up 1-5 simulated managedcluster with the following command: + +``` +$ ./clean-managedcluster.sh 1 5 +Deleting Simulated managedCluster simulated-1-managedcluster... +managedcluster.cluster.open-cluster-management.io "simulated-1-managedcluster" deleted +Deleting Simulated managedCluster simulated-2-managedcluster... +managedcluster.cluster.open-cluster-management.io "simulated-2-managedcluster" deleted +Deleting Simulated managedCluster simulated-3-managedcluster... +managedcluster.cluster.open-cluster-management.io "simulated-3-managedcluster" deleted +Deleting Simulated managedCluster simulated-4-managedcluster... +managedcluster.cluster.open-cluster-management.io "simulated-4-managedcluster" deleted +Deleting Simulated managedCluster simulated-5-managedcluster... +managedcluster.cluster.open-cluster-management.io "simulated-5-managedcluster" deleted +``` diff --git a/tools/simulator/managed-cluster/clean-managedcluster.sh b/tools/simulator/managed-cluster/clean-managedcluster.sh new file mode 100755 index 000000000..cfdd401a6 --- /dev/null +++ b/tools/simulator/managed-cluster/clean-managedcluster.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +set -exo pipefail + +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + echo "kubectl or oc must be installed!" + exit 1 + fi +fi + +# deleting the simulated managedcluster +for i in $(seq $1 $2) +do + echo "Deleting Simulated managedCluster simulated-${i}-managedcluster..." + ${KUBECTL} delete managedcluster simulated-${i}-managedcluster +done diff --git a/tools/simulator/managed-cluster/setup-managedcluster.sh b/tools/simulator/managed-cluster/setup-managedcluster.sh index e644778f9..087dc5f63 100755 --- a/tools/simulator/managed-cluster/setup-managedcluster.sh +++ b/tools/simulator/managed-cluster/setup-managedcluster.sh @@ -2,37 +2,38 @@ # Copyright (c) 2021 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project -# default kube client is kubectl, use oc if kubectl is nit installed -KUBECLIENT="kubectl" +set -exo pipefail +WORK_DIR="$(cd "$(dirname "$0")" ; pwd -P)" +# Create bin directory and add it to PATH +mkdir -p ${WORK_DIR}/bin +export PATH=${PATH}:${WORK_DIR}/bin + +KUBECTL="kubectl" if ! command -v kubectl &> /dev/null; then if command -v oc &> /dev/null; then - KUBECLIENT="oc" + KUBECTL="oc" else + echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" if [[ "$(uname)" == "Linux" ]]; then - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl elif [[ "$(uname)" == "Darwin" ]]; then - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl fi - chmod +x ${PWD}/kubectl - KUBECLIENT=${PWD}/kubectl + chmod +x ./kubectl && mv ./kubectl ${WORK_DIR}/bin/kubectl fi fi -SED_COMMAND='sed -e' -if [[ "$(uname)" == "Darwin" ]]; then - SED_COMMAND='sed -e' -fi - -# temporal working directory -WORKDIR=$(mktemp -d) -${KUBECLIENT} get managedcluster local-cluster -o yaml > ${WORKDIR}/simulated-managedcluster.yaml - # creating the simulated managedcluster -for index in $(seq $1 $2) +for i in $(seq $1 $2) do - echo "Creating Simulated managedCluster simulated-${index}-managedcluster..." - ${KUBECLIENT} create ns simulated-${index}-managedcluster --dry-run -o yaml | ${KUBECLIENT} apply -f - - ${SED_COMMAND} "s~local-cluster~simulated-${index}-managedcluster~" ${WORKDIR}/simulated-managedcluster.yaml | ${KUBECLIENT} apply -f - + echo "Creating Simulated managedCluster simulated-${i}-managedcluster..." + cat < /dev/null & \ - sleep 50 ; \ - query="curl --fail --silent -G http://localhost:9090/federate"; \ - for rule in $$(cat $(METRICS_JSON) | jq -r '.[]'); do \ - query="$$query $$(printf -- "--data-urlencode match[]=%s" $$rule)"; \ - done; \ - echo '# This file was generated using `make $@`.' > $@.txt ; \ - $$query >> $@.txt ; \ - jobs -p | xargs -r kill - -$(METRICS_JSON): $(GOJSONTOYAML_BIN) - matches=`curl -L https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml | \ - $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.data."metrics_list.yaml"' | $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"'`; \ - names=`curl -L https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/manifests/base/config/metrics_allowlist.yaml | \ - $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.data."metrics_list.yaml"' | $(GOJSONTOYAML_BIN) --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"'`; \ - echo $$matches $$names | jq -s . > $@ - -$(GOJSONTOYAML_BIN): $(BIN_DIR) - GOBIN=$(BIN_DIR) go get github.com/brancz/gojsontoyaml - -$(BIN_DIR): - mkdir -p $@ - -build: - docker build -t $(METRICS_IMAGE) . - -push: - docker push $(METRICS_IMAGE) - -clean: - rm -r _output && rm timeseries.txt diff --git a/tools/simulator/metrics-collector/README.md b/tools/simulator/metrics-collector/README.md index 5132bb540..969533ee5 100644 --- a/tools/simulator/metrics-collector/README.md +++ b/tools/simulator/metrics-collector/README.md @@ -1,53 +1,127 @@ # Metrics Collector Simulator -Metrics collector simulator can be used to setup multiple metrics collector in different namespaces in one managed cluster, to simulate thousands of managed clusters push metrics to ACM hub cluster for scale testing. +Metrics collector simulator can be used to setup multiple metrics collectors inside or outside of the cluster, to simulate thousands of managed clusters pushing metrics concurrently to ACM hub cluster for scalability testing. _Note:_ this simulator is for testing purpose only. ## Prereqs + You must meet the following requirements to setup metrics collector: - ACM 2.1+ available -- `MultiClusterObservability` instance available and have following pods in `open-cluster-management-addon-observability` namespace: +- `MultiClusterObservability` instance available and `metrics-collector` pod is running in `open-cluster-management-addon-observability` namespace: ``` - $ oc get po -n open-cluster-management-addon-observability - NAME READY STATUS RESTARTS AGE - endpoint-observability-operator-7f8f949bc8-trwzh 2/2 Running 0 118m - metrics-collector-deployment-74cbf5896f-jhg6v 1/1 Running 0 111m + $ oc get pod -n open-cluster-management-addon-observability -l component=metrics-collector + NAME READY STATUS RESTARTS AGE + metrics-collector-deployment-695c5fbd8-l2m89 1/1 Running 0 5m ``` -## Quick Start -### Setup metrics collector -You can run `setup-metrics-collector.sh` following with a number to setup multiple metrics collector. +## How to use + +### Run locally outside the cluster + +1. Get the host of the metrics remote write address in ACM hub cluster: + +```bash +export TO_UPLOAD_HOST=$(oc -n open-cluster-management-observability get route observatorium-api -o jsonpath="{.spec.host}") +``` + +2. Retrieve the CA certificate used to verify the metrics remote write address: + +```bash +oc -n open-cluster-management-addon-observability get secret observability-managed-cluster-certs -o jsonpath="{.data.ca\.crt}" | base64 -d > ca.crt +``` + +3. Get the certificate and private key used to secure the request to the metrics remote write address: + +```bash +oc -n open-cluster-management-addon-observability get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -o jsonpath="{.data.tls\.crt}" | base64 -d > tls.crt +oc -n open-cluster-management-addon-observability get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -o jsonpath="{.data.tls\.key}" | base64 -d > tls.key +``` + +4. Set the name and ID of simulated managed cluster, for example: + +``` +export SIMULATED_MANAGED_CLUSTER_NAME=simulated-sno-1 +export SIMULATED_MANAGED_CLUSTER_ID=2b4bfc20-110e-4c4e-aa42-d97ac608c5e8 +``` + +5. Retrieve the simulated metrics by executing the script `generate-metrics-data.sh`. For example, log into an SNO cluster and execute the following command to get the metrics for SNO cluster: + + +``` +IS_TIMESERIES_ONLY=true ./generate-metrics-data.sh +``` + +> _Note:_ we should find a file named `timeseries.txt` in current directory after running the command above that contains the metrics, you can generate simulated metrics in any connected OCP cluster. + + +6. Run the metrics-collector to remotely write simulated SNO metrics to the ACM hub by running the following command: -For example, setup 2 metrics collectors with 100 workers by the following command: ``` -# ./setup-metrics-collector.sh 2 100 +$ export STANDALONE=true && go run ../../../collectors/metrics/cmd/metrics-collector/main.go \ + --to-upload https://${TO_UPLOAD_HOST}/api/metrics/v1/default/api/v1/receive \ + --to-upload-ca ./ca.crt \ + --to-upload-cert ./tls.crt \ + --to-upload-key ./tls.key \ + --simulated-timeseries-file=./timeseries.txt \ + --label="cluster=${SIMULATED_MANAGED_CLUSTER_NAME}" \ + --label="clusterID=${SIMULATED_MANAGED_CLUSTER_ID}" +level=info caller=logger.go:45 ts=2021-11-19T07:58:39.011221342Z msg="metrics collector initialized" +... +level=debug caller=logger.go:40 ts=2021-11-19T07:58:39.117297417Z component=forwarder component=metricsclient timeseriesnumber=3667 +level=debug caller=logger.go:40 ts=2021-11-19T07:58:39.122690473Z component=forwarder component=metricsclient timeseriesnumber=3667 +level=info caller=logger.go:45 ts=2021-11-19T07:58:39.250981391Z component=forwarder component=metricsclient msg="Metrics pushed successfully" +level=info caller=logger.go:45 ts=2021-11-19T07:58:39.267185279Z component=forwarder component=metricsclient msg="Metrics pushed successfully" ``` -Check if all the metrics collector running successfully in your cluster: + +7. Optionally specify the number of concurrent workers that push the metrics by `--worker-number` flag, the default value is `1`. + +8. Optionally specify the interval of pushing the metrics by `--interval` flag, the default value is `300s`. + +### Run as a Deployment inside the cluster + +1. Run `setup-metrics-collector.sh` script to setup multiple metrics collector, `-n` specifies the simulated metrics collector number, optional `-t` specifies the metrics data source type, can be "SNO"(default value) or "NON_SNO", and optional `-w` specifies the worker number for each simulated metrics collector, you can also specifies the simulated metrics collector name prefix by the `-m` flag. For example, setup 2 metrics collectors with 100 workers that collect the SNO metrics data by the following command: + +```bash +./setup-metrics-collector.sh -n 2 -t SNO -w 100 ``` -# oc get pods --all-namespaces | grep simulate-managed-cluster + +2. Check if all the metrics collector running successfully in your cluster: + +```bash +$ oc get pods --all-namespaces | grep simulated-managed-cluster simulate-managed-cluster1 metrics-collector-deployment-7d69d9f897-xn8vz 1/1 Running 0 22h simulate-managed-cluster2 metrics-collector-deployment-67844bfc59-lwchn 1/1 Running 0 22h ``` -It simulates 200 metrics collectors to push the data into hub thanos. -> Note: if you want the simulated metrics-collector be scheduled to master node, so that more simulated metrics-collectors can be deployed, you can set the environment variable `ALLOW_SCHEDULED_TO_MASTER` to be `true` before executing the setup script. +> _Note:_ the above command will simulate 200 metrics collectors pushing the data concurrently into hub thanos. ### Clean metrics collector -Use `clean-metrics-collector.sh` to remove all metrics collector you created. -``` -# ./clean-metrics-collector.sh 10 -``` -## Generate your own metrics data source -By default, `setup-metrics-collector.sh` is using metrics data defined in env `METRICS_IMAGE` as data source. You can build and push your own metrics data image with below command: +Use `clean-metrics-collector.sh` to remove all the simulated metrics collector, `-n` specifies the simulated metrics collector number: + +```bash +./clean-metrics-collector.sh -n 2 ``` -# METRICS_IMAGE= make all + +## Customize the metrics data source + +### Generate your own data source + +By default, `setup-metrics-collector.sh` is using metrics data defined in env `METRICS_IMAGE` as data source. If you want to build and publish your own metrics data image, you must log into an OCP cluster and then execute the following command: + +```bash +METRICS_IMAGE= ./generate-metrics-data.sh ``` + +> _Note:_ if you want to generate metrics data against OCP 3.11 cluster, add `IS_GENERATING_OCP311_METRICS=true` before the command above. + ## Setup metrics collector with your own metrics data source + Running below command to setup metrics collectors with your own data source: -``` -# METRICS_IMAGE= ./setup-metrics-collector.sh 10 + +```bash +METRICS_IMAGE= ./setup-metrics-collector.sh -n 10 ``` diff --git a/tools/simulator/metrics-collector/clean-metrics-collector.sh b/tools/simulator/metrics-collector/clean-metrics-collector.sh index 79833ed37..a6c3ade75 100755 --- a/tools/simulator/metrics-collector/clean-metrics-collector.sh +++ b/tools/simulator/metrics-collector/clean-metrics-collector.sh @@ -2,28 +2,66 @@ # Copyright (c) 2021 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project -sed_command='sed -i' -managed_cluster='managed' -if [ $# -eq 2 ]; then - managed_cluster=$2 +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + echo "kubectl or oc must be installed!" + exit 1 + fi fi -if [ $# -lt 1 ]; then - echo "this script must be run with the number of clusters:" - echo -e "\n$0 total_clusters\n" - exit 1 +function usage() { + echo "${0} -n NUMBERS [-m MANAGED_CLUSTER_PREFIX]" + echo '' + # shellcheck disable=SC2016 + echo ' -n: Specifies the total number of simulated metrics collectors, required' + # shellcheck disable=SC2016 + echo ' -m: Specifies the prefix for the simulated managedcluster name, optional, the default value is "simulated-managed-cluster".' + echo '' +} + +MANAGED_CLUSTER_PREFIX="simulated-managed-cluster" # default managedccluster name prefix + +# Allow command-line args to override the defaults. +while getopts ":n:m:h" opt; do + case ${opt} in + n) + NUMBERS=${OPTARG} + ;; + m) + MANAGED_CLUSTER_PREFIX=${OPTARG} + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -${OPTARG}" >&2 + usage + exit 1 + ;; + esac +done + +if [[ -z "${NUMBERS}" ]]; then + echo "Error: NUMBERS (-n) must be specified!" + usage + exit 1 fi re='^[0-9]+$' -if ! [[ $1 =~ $re ]] ; then - echo "error: arguments <$1> not a number" >&2; exit 1 +if ! [[ ${NUMBERS} =~ ${re} ]] ; then + echo "error: arguments <${NUMBERS}> is not a number" >&2; exit 1 + fi -for i in $(seq 1 $1) +for i in $(seq 1 ${NUMBERS}) do - cluster_name=simulate-${managed_cluster}-cluster${i} - kubectl delete deploy -n ${cluster_name} metrics-collector-deployment - kubectl delete clusterrolebinding ${cluster_name}-clusters-metrics-collector-view - kubectl delete -n ${cluster_name} secret/observability-managed-cluster-certs - kubectl delete ns ${cluster_name} + cluster_name=${MANAGED_CLUSTER_PREFIX}-${i} + ${KUBECTL} delete deploy -n ${cluster_name} metrics-collector-deployment + ${KUBECTL} delete clusterrolebinding ${cluster_name}-clusters-metrics-collector-view + ${KUBECTL} delete -n ${cluster_name} secret/observability-managed-cluster-certs + ${KUBECTL} delete ns ${cluster_name} done diff --git a/tools/simulator/metrics-collector/generate-metrics-data.sh b/tools/simulator/metrics-collector/generate-metrics-data.sh new file mode 100755 index 000000000..46e7513b0 --- /dev/null +++ b/tools/simulator/metrics-collector/generate-metrics-data.sh @@ -0,0 +1,144 @@ +#!/bin/bash +# Copyright (c) 2021 Red Hat, Inc. +# Copyright Contributors to the Open Cluster Management project + +# Copyright Contributors to the Open Cluster Management project + +set -eo pipefail + +WORKDIR="$(cd "$(dirname "$0")" ; pwd -P)" +# Create bin directory and add it to PATH +mkdir -p ${ROOTDIR}/bin +export PATH={ROOTDIR}/bin:${PATH} + +# tmp output directory for metrics list +TMP_OUT=$(mktemp -d /tmp/metrics.XXXXXXXXXX) +METRICS_JSON_OUT=${TMP_OUT}/metrics.json +RECORDINGRULES_JSON_OUT=${TMP_OUT}/recordingrules.json +TIME_SERIES_OUT=${WORKDIR}/timeseries.txt + +METRICS_ALLOW_LIST_URL=${METRICS_ALLOW_LIST_URL:-https://raw.githubusercontent.com/stolostron/multicluster-observability-operator/main/operators/multiclusterobservability/manifests/base/config/metrics_allowlist.yaml} +METRICS_IMAGE=${METRICS_IMAGE-quay.io/ocm-observability/metrics-data:2.4.0} + +if [[ -z "${IS_TIMESERIES_ONLY}" ]]; then + # check docker + if ! command -v docker &> /dev/null; then + echo "docker must be installed to run this script." + exit 1 + fi +fi + +# install kubectl +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + if [[ "$(uname)" == "Linux" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + elif [[ "$(uname)" == "Darwin" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl + fi + chmod +x ./kubectl && mv ./kubectl ${WORK_DIR}/bin/kubectl + fi +fi + +# install jq +if ! command -v jq &> /dev/null; then + if [[ "$(uname)" == "Linux" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + elif [[ "$(uname)" == "Darwin" ]]; then + curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 + fi + chmod +x ./jq + chmod +x ./jq && mv ./jq ${WORK_DIR}/bin/jq +fi + +# install gojsontoyaml +GOBIN=${WORK_DIR}/bin go get github.com/brancz/gojsontoyaml +GOJSONTOYAML_BIN=${WORK_DIR}/bin/gojsontoyaml + +function get_metrics_list() { + echo "getting metrics list..." + if [[ -z "${IS_GENERATING_OCP311_METRICS}" ]]; then + matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') + names=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') + echo $matches $names | jq -s . > ${METRICS_JSON_OUT} + else + matches=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.matches' | jq '"{" + .[] + "}"') + names=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.names' | jq '"{__name__=\"" + .[] + "\"}"') + echo $matches $names | jq -s . > ${METRICS_JSON_OUT} + fi +} + +function get_recordingrules_list() { + echo "getting recordingrules list..." + if [[ -z "${IS_GENERATING_OCP311_METRICS}" ]]; then + recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq '.rules[]') + echo "$recordingrules" | jq -s . > ${RECORDINGRULES_JSON_OUT} + else + recordingrules=$(curl -L ${METRICS_ALLOW_LIST_URL} | ${GOJSONTOYAML_BIN} --yamltojson | jq -r '.data."ocp311_metrics_list.yaml"' | ${GOJSONTOYAML_BIN} --yamltojson | jq '.rules[]') + echo "$recordingrules" | jq -s . > ${RECORDINGRULES_JSON_OUT} + fi +} + +function generate_metrics() { + echo "generating metrics..." + federate="curl --fail --silent -G http://localhost:9090/federate" + for rule in $(cat ${METRICS_JSON_OUT} | jq -r '.[]'); + do + federate="${federate} $(printf -- "--data-urlencode match[]=%s" ${rule})" + done + echo '# Beggining for metrics' > ${TIME_SERIES_OUT} + ${federate} >> ${TIME_SERIES_OUT} +} + +function generate_recordingrules() { + echo "generating recordingrules..." + query="curl --fail --silent -G http://localhost:9090/api/v1/query" + cat ${RECORDINGRULES_JSON_OUT} | jq -cr '.[]' | while read item; + do + record=$(jq -r '.record' <<< "$item") + expr=$(jq -r '.expr' <<< "$item") + #expr=${expr//\"/\\\"} + expr=$(echo "${expr}" | tr -d " ") + querycmd="${query} $(printf -- "--data-urlencode query=%s" ${expr})" + echo -e "\n# TYPE ${record} untyped" >> ${TIME_SERIES_OUT} + ${querycmd} | jq -r '.data.result' | jq -cr '.[]' | while read result; + do + vec="${record}" + metric=$(jq -r '.metric | to_entries | map("\(.key)=\"\(.value | tostring)\"") | .[]' <<< "$result") + metric=$(echo "${metric}" | sed ':a;N;$!ba;s/\n/,/g') + vec="${vec}{${metric}}" + timestamp=$(jq -r '.value[0]' <<< "$result") + value=$(jq -r '.value[1]' <<< "$result") + timestamp=$(echo "${timestamp} * 1000" | bc) + timestamp=${timestamp%.*} + echo "${vec} ${value} ${timestamp}" >> ${TIME_SERIES_OUT} + done + done +} + +function generate_timeseries() { + ${KUBECTL} port-forward -n openshift-monitoring prometheus-k8s-0 9090 > /dev/null & + sleep 10 + generate_metrics + generate_recordingrules + jobs -p | xargs -r kill +} + +function build_metrics_data_image() { + docker build -t ${METRICS_IMAGE} . +} + +function push_metrics_data_image() { + docker push ${METRICS_IMAGE} +} + +get_metrics_list +get_recordingrules_list +generate_timeseries +if [[ -z "${IS_TIMESERIES_ONLY}" ]]; then + build_metrics_data_image + push_metrics_data_image +fi diff --git a/tools/simulator/metrics-collector/metrics-collector-view.yaml b/tools/simulator/metrics-collector/metrics-collector-view.yaml index c28cf7314..2a53d39a7 100644 --- a/tools/simulator/metrics-collector/metrics-collector-view.yaml +++ b/tools/simulator/metrics-collector/metrics-collector-view.yaml @@ -11,4 +11,4 @@ subjects: roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: cluster-monitoring-view \ No newline at end of file + name: cluster-monitoring-view diff --git a/tools/simulator/metrics-collector/setup-metrics-collector.sh b/tools/simulator/metrics-collector/setup-metrics-collector.sh index f683e9d0b..9b0129137 100755 --- a/tools/simulator/metrics-collector/setup-metrics-collector.sh +++ b/tools/simulator/metrics-collector/setup-metrics-collector.sh @@ -2,9 +2,10 @@ # Copyright (c) 2021 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project -METRICS_IMAGE="${METRICS_IMAGE:-quay.io/ocm-observability/metrics-data:2.4.0}" -WORKDIR="$(pwd -P)" -export PATH=${PATH}:${WORKDIR} +WORK_DIR="$(cd "$(dirname "$0")" ; pwd -P)" +# Create bin directory and add it to PATH +mkdir -p ${WORK_DIR}/bin +export PATH=${PATH}:${WORK_DIR}/bin if ! command -v jq &> /dev/null; then if [[ "$(uname)" == "Linux" ]]; then @@ -13,85 +14,145 @@ if ! command -v jq &> /dev/null; then curl -o jq -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 fi chmod +x ./jq + chmod +x ./jq && mv ./jq ${WORK_DIR}/bin/jq fi -sed_command='sed -i' -if [[ "$(uname)" == "Darwin" ]]; then - sed_command='sed -i -e' +KUBECTL="kubectl" +if ! command -v kubectl &> /dev/null; then + if command -v oc &> /dev/null; then + KUBECTL="oc" + else + echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" + if [[ "$(uname)" == "Linux" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + elif [[ "$(uname)" == "Darwin" ]]; then + curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/darwin/amd64/kubectl + fi + chmod +x ./kubectl && mv ./kubectl ${WORK_DIR}/bin/kubectl + fi fi -managed_cluster='managed' -if [ $# -eq 3 ]; then - managed_cluster=$3 +SED_COMMAND='sed -i' +if [[ "$(uname)" == "Darwin" ]]; then + SED_COMMAND='sed -i -e' fi -if [ $# -lt 1 ]; then - echo "this script must be run with the number of metrics-collector:" - echo -e "\n$0 total_collectors\n" - exit 1 +function usage() { + echo "${0} -n NUMBERS [-t METRICS_DATA_TYPE] [-w WORKERS] [-m MANAGED_CLUSTER_PREFIX]" + echo '' + # shellcheck disable=SC2016 + echo ' -n: Specifies the total number of simulated metrics collectors, required' + # shellcheck disable=SC2016 + echo ' -t: Specifies the data type of metrics data source, the default value is "NON_SNO", it also can be "SNO".' + # shellcheck disable=SC2016 + echo ' -w: Specifies the worker threads for each simulated metrics collector, optional, the default value is "1".' + # shellcheck disable=SC2016 + echo ' -m: Specifies the prefix for the simulated managedcluster name, optional, the default value is "simulated-managed-cluster".' + echo '' +} + +WORKERS=1 # default worker threads for each simulated metrics collector +METRICS_DATA_TYPE="NON_SNO" # default metrics data source type +MANAGED_CLUSTER_PREFIX="simulated-managed-cluster" # default managedccluster name prefix + +# Allow command-line args to override the defaults. +while getopts ":n:t:w:m:h" opt; do + case ${opt} in + n) + NUMBERS=${OPTARG} + ;; + t) + METRICS_DATA_TYPE=${OPTARG} + ;; + w) + WORKERS=${OPTARG} + ;; + m) + MANAGED_CLUSTER_PREFIX=${OPTARG} + ;; + h) + usage + exit 0 + ;; + \?) + echo "Invalid option: -${OPTARG}" >&2 + usage + exit 1 + ;; + esac +done + +if [[ -z "${NUMBERS}" ]]; then + echo "Error: NUMBERS (-n) must be specified!" + usage + exit 1 fi re='^[0-9]+$' -if ! [[ $1 =~ $re ]] ; then - echo "error: arguments <$1> not a number" >&2; exit 1 +if ! [[ ${NUMBERS} =~ ${re} ]] ; then + echo "error: arguments <${NUMBERS}> is not a number" >&2; exit 1 fi -workers=1 -if [ $# -gt 2 ]; then - workers=$2 +if [[ ${METRICS_DATA_TYPE} != "SNO" && ${METRICS_DATA_TYPE} != "NON_SNO" ]] ; then + echo "error: arguments <${METRICS_DATA_TYPE}> is not valid, it must be 'SNO' of 'NON_SNO'" >&2; exit 1 fi -for i in $(seq 1 $1) +if ! [[ ${WORKERS} =~ ${re} ]] ; then + echo "error: arguments <${WORKERS}> is not a number" >&2; exit 1 +fi + +OBSERVABILITY_NS="open-cluster-management-addon-observability" + +# metrics data source image +DEFAULT_METRICS_IMAGE="quay.io/ocm-observability/metrics-data:2.4.0" +if [[ ${METRICS_DATA_TYPE} == "SNO" ]] ; then + DEFAULT_METRICS_IMAGE="quay.io/ocm-observability/metrics-data:2.4.0-sno" +fi +METRICS_IMAGE="${METRICS_IMAGE:-$DEFAULT_METRICS_IMAGE}" + +for i in $(seq 1 ${NUMBERS}) do - cluster_name=simulate-${managed_cluster}-cluster${i} - kubectl create ns ${cluster_name} + cluster_name=${MANAGED_CLUSTER_PREFIX}-${i} + ${KUBECTL} create ns ${cluster_name} # create ca/sa/rolebinding for metrics collector - kubectl get configmap metrics-collector-serving-certs-ca-bundle -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get secret observability-managed-cluster-certs -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get sa endpoint-observability-operator-sa -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl -n ${cluster_name} patch secret observability-managed-cluster-certs --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' - kubectl -n ${cluster_name} patch sa endpoint-observability-operator-sa --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + ${KUBECTL} get configmap metrics-collector-serving-certs-ca-bundle -n ${OBSERVABILITY_NS} -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | ${KUBECTL} apply -n ${cluster_name} -f - + ${KUBECTL} get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n ${OBSERVABILITY_NS} -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | ${KUBECTL} apply -n ${cluster_name} -f - + ${KUBECTL} get secret observability-managed-cluster-certs -n ${OBSERVABILITY_NS} -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | ${KUBECTL} apply -n ${cluster_name} -f - + ${KUBECTL} get sa endpoint-observability-operator-sa -n ${OBSERVABILITY_NS} -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | ${KUBECTL} apply -n ${cluster_name} -f - + ${KUBECTL} -n ${cluster_name} patch secret observability-managed-cluster-certs --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + ${KUBECTL} -n ${cluster_name} patch sa endpoint-observability-operator-sa --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' # deploy metrics collector deployment to cluster ns deploy_yaml_file=${cluster_name}-metrics-collector-deployment.json - kubectl get deploy metrics-collector-deployment -n open-cluster-management-addon-observability -o json > $deploy_yaml_file + ${KUBECTL} get deploy metrics-collector-deployment -n ${OBSERVABILITY_NS} -o json > ${deploy_yaml_file} # replace namespace, cluster and clusterID. Insert --simulated-timeseries-file - uuid=$(cat /proc/sys/kernel/random/uuid) - jq \ - --arg cluster_name $cluster_name \ - --arg cluster "--label=\"cluster=$cluster_name\"" \ - --arg clusterID "--label=\"clusterID=$uuid\"" \ - --arg workerNum "--worker-number=$workers" \ - --arg file "--simulated-timeseries-file=/metrics-volume/timeseries.txt" \ - '.metadata.namespace=$cluster_name | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $cluster |.spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $clusterID | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $file | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $workerNum' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file + uuid=$(cat /proc/sys/kernel/random/uuid) + jq \ + --arg cluster_name ${cluster_name} \ + --arg cluster "--label=\"cluster=${cluster_name}\"" \ + --arg clusterID "--label=\"clusterID=${uuid}\"" \ + --arg workerNum "--worker-number=${WORKERS}" \ + --arg file "--simulated-timeseries-file=/metrics-volume/timeseries.txt" \ + '.metadata.namespace=$cluster_name | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $cluster |.spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $clusterID | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $file | .spec.template.spec.containers[0].command[.spec.template.spec.containers[0].command|length] |= . + $workerNum' ${deploy_yaml_file} > ${deploy_yaml_file}.tmp && mv ${deploy_yaml_file}.tmp ${deploy_yaml_file} # insert metrics initContainer - jq \ - --argjson init '{"initContainers": [{"command":["sh","-c","cp /tmp/timeseries.txt /metrics-volume"],"image":"'$METRICS_IMAGE'","imagePullPolicy":"Always","name":"init-metrics","volumeMounts":[{"mountPath":"/metrics-volume","name":"metrics-volume"}]}]}' \ + jq \ + --argjson init '{"initContainers": [{"command":["sh","-c","cp /tmp/timeseries.txt /metrics-volume"],"image":"'${METRICS_IMAGE}'","imagePullPolicy":"IfNotPresent","name":"init-metrics","volumeMounts":[{"mountPath":"/metrics-volume","name":"metrics-volume"}]}]}' \ --argjson emptydir '{"emptyDir": {}, "name": "metrics-volume"}' \ --argjson metricsdir '{"mountPath": "/metrics-volume","name": "metrics-volume"}' \ - '.spec.template.spec += $init | .spec.template.spec.volumes += [$emptydir] | .spec.template.spec.containers[0].volumeMounts += [$metricsdir]' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file - - if [ "$ALLOW_SCHEDULED_TO_MASTER" == "true" ]; then - # insert tolerations - jq \ - --argjson tolerations '{"tolerations": [{"key":"node-role.kubernetes.io/master","operator":"Exists","effect":"NoSchedule"}]}' \ - '.spec.template.spec += $tolerations' $deploy_yaml_file > $deploy_yaml_file.tmp && mv $deploy_yaml_file.tmp $deploy_yaml_file - fi + '.spec.template.spec += $init | .spec.template.spec.volumes += [$emptydir] | .spec.template.spec.containers[0].volumeMounts += [$metricsdir]' ${deploy_yaml_file} > ${deploy_yaml_file}.tmp && mv ${deploy_yaml_file}.tmp ${deploy_yaml_file} - cat "$deploy_yaml_file" | kubectl -n ${cluster_name} apply -f - - rm -rf "$deploy_yaml_file" "$deploy_yaml_file".tmp - kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' - kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' + cat "${deploy_yaml_file}" | ${KUBECTL} -n ${cluster_name} apply -f - + rm -f "${deploy_yaml_file}" "${deploy_yaml_file}".tmp + ${KUBECTL} -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' + ${KUBECTL} -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' # deploy ClusterRoleBinding for read metrics from OCP prometheus rolebinding_yaml_file=${cluster_name}-metrics-collector-view.yaml cp -rf metrics-collector-view.yaml "$rolebinding_yaml_file" - $sed_command "s~__CLUSTER_NAME__~${cluster_name}~g" "$rolebinding_yaml_file" - cat "$rolebinding_yaml_file" | kubectl -n ${cluster_name} apply -f - - rm -rf "$rolebinding_yaml_file" - + ${SED_COMMAND} "s~__CLUSTER_NAME__~${cluster_name}~g" "${rolebinding_yaml_file}" + cat "${rolebinding_yaml_file}" | ${KUBECTL} -n ${cluster_name} apply -f - + rm -f "${rolebinding_yaml_file}" done From 829bc7f7a580e0dfc985f17459edd1b0a27ed40e Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 31 Mar 2022 17:15:49 +0800 Subject: [PATCH 013/150] fix disabled cr to checkaddon issue in 2.5 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 27 +++--- tests/pkg/tests/results.xml.arm | 98 +++++++++++++++++++++ tests/pkg/tests/results.xml.fips | 94 ++++++++++++++++++++ tests/pkg/tests/results.xml.rhv | 59 +++++++++++++ tests/pkg/tests/results.xml.sno | 90 +++++++++++++++++++ 5 files changed, 356 insertions(+), 12 deletions(-) create mode 100644 tests/pkg/tests/results.xml.arm create mode 100644 tests/pkg/tests/results.xml.fips create mode 100644 tests/pkg/tests/results.xml.rhv create mode 100644 tests/pkg/tests/results.xml.sno diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index f5b4be885..f0a34c01d 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -116,10 +116,10 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { - Eventually(func() error { - return utils.ModifyMCOAddonSpecMetrics(testOptions, true) - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, true) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) By("Waiting for MCO addon components ready") Eventually(func() bool { @@ -176,14 +176,17 @@ var _ = Describe("", func() { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - By("Waiting for MCO addon components scales to 0") - Eventually(func() bool { - err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) - if err == nil && obaNS == nil { - return true - } - return false - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + klog.V(1).Infof("managedcluster number is <%d>", len(testOptions.ManagedClusters)) + if len(testOptions.ManagedClusters) > 0 { + By("Waiting for MCO addon components scales to 0") + Eventually(func() bool { + err, obaNS := utils.GetNamespace(testOptions, false, MCO_ADDON_NAMESPACE) + if err == nil && obaNS == nil { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + } }) It("[Stable] Remove disable observability label from the managed cluster", func() { diff --git a/tests/pkg/tests/results.xml.arm b/tests/pkg/tests/results.xml.arm new file mode 100644 index 000000000..8e80217de --- /dev/null +++ b/tests/pkg/tests/results.xml.arm @@ -0,0 +1,98 @@ + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:155 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:165 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:192 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:200 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:247 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00059e610>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 + �[1mSTEP�[0m: Check addon resource requirement + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc0006414f0>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 + �[1mSTEP�[0m: Check metrics-collector resource requirement + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:119 Timed out after 1200.001s. Expected success, but got an error: <*errors.StatusError | 0xc0002d68c0>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:145 + �[1mSTEP�[0m: Waiting for MCO addon components ready �[1mSTEP�[0m: Checking the status in managedclusteraddon reflects the endpoint operator status correctly + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.002s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:278 Timed out after 300.001s. Expected success, but got an error: <*errors.errorString | 0xc000631260>: { s: "Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster", } Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:377 + �[1mSTEP�[0m: Checking Watchdog alerts are forwarded to the hub + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_test.go:28 Timed out after 360.068s. Expected success, but got an error: <*errors.errorString | 0xc000be8a70>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_test.go:46 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:51 Timed out after 600.000s. Expected success, but got an error: <*errors.errorString | 0xc000073590>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:77 + �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00026d020>: { ProcessState: { pid: 62559, status: 256, rusage: { Utime: { Sec: 0, Usec: 437719, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 195890, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35835904, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36535, Majflt: 3161, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 202, Nvcsw: 92, Nivcsw: 5849, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:43 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:51 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:99 Skip the case since the MCO CR did not set the nodeSelector /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:107 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:197 Unexpected error: <*errors.errorString | 0xc0006a74f0>: { s: "statefulset observability-alertmanager should have 3 but got 0 ready replicas", } statefulset observability-alertmanager should have 3 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:198 + �[1mSTEP�[0m: Checking podAntiAffinity for all pods + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:197 Unexpected error: <*errors.errorString | 0xc0005144b0>: { s: "statefulset observability-alertmanager should have 3 but got 0 ready replicas", } statefulset observability-alertmanager should have 3 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:198 + �[1mSTEP�[0m: Resizing alertmanager storage + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:150 + + \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.fips b/tests/pkg/tests/results.xml.fips new file mode 100644 index 000000000..4d1441949 --- /dev/null +++ b/tests/pkg/tests/results.xml.fips @@ -0,0 +1,94 @@ + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:51 Timed out after 636.857s. Expected success, but got an error: <*errors.errorString | 0xc0005d00b0>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:77 + �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console + + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:60 Timed out after 60.001s. Expected success, but got an error: <*errors.StatusError | 0xc0005403c0>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "Internal error occurred: failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Reason: "InternalError", Details: { Name: "", Group: "", Kind: "", UID: "", Causes: [ { Type: "", Message: "failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Field: "", }, ], RetryAfterSeconds: 0, }, Code: 500, }, } Internal error occurred: failed calling webhook "vmulticlusterobservability.observability.open-cluster-management.io": Post "https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s": no endpoints available for service "multicluster-observability-webhook-service" /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:63 + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.000s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00098d760>: { ProcessState: { pid: 64306, status: 256, rusage: { Utime: { Sec: 0, Usec: 373516, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 148181, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35418112, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36214, Majflt: 3139, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 162, Nvcsw: 57, Nivcsw: 3449, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:131 Timed out after 300.001s. Expected success, but got an error: <*errors.errorString | 0xc000619a60>: { s: "the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI}", } the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI} /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:144 + �[1mSTEP�[0m: Resizing alertmanager storage + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Unexpected error: <*errors.StatusError | 0xc0007f0280>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "Internal error occurred: failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Reason: "InternalError", Details: { Name: "", Group: "", Kind: "", UID: "", Causes: [ { Type: "", Message: "failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Field: "", }, ], RetryAfterSeconds: 0, }, Code: 500, }, } Internal error occurred: failed calling webhook "vmulticlusterobservability.observability.open-cluster-management.io": Post "https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s": no endpoints available for service "multicluster-observability-webhook-service" occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:154 + �[1mSTEP�[0m: Revert MCO CR changes + + + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:278 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc000348860>: { s: "Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster", } Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:377 + �[1mSTEP�[0m: Checking Watchdog alerts are forwarded to the hub + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Test Panicked /Users/cqu/go/src/runtime/iface.go:261 Panic: interface conversion: interface {} is nil, not map[string]interface {} Full stack: github.com/stolostron/multicluster-observability-operator/tests/pkg/tests.glob..func6.6() /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:254 +0x7bc github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0x0) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/runner.go:113 +0xba github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0x0) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/runner.go:64 +0x125 github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc000a3a270) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/it_node.go:26 +0x7b github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc000365770, 0xc000c779f0, {0x5804400, 0xc0000a0900}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/spec/spec.go:215 +0x2a9 github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc000365770, {0x5804400, 0xc0000a0900}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/spec/spec.go:138 +0xe7 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc0003cd080, 0xc000365770) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:200 +0xe5 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc0003cd080) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:170 +0x1a5 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc0003cd080) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:66 +0xc5 github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc0000c6cb0, {0x91b22f0, 0xc00037d1e0}, {0x55e738d, 0x1}, {0xc0001cb7a0, 0x2, 0x2}, {0x58527d8, 0xc0000a0900}, ...) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/suite/suite.go:79 +0x4d2 github.com/onsi/ginkgo.runSpecsWithCustomReporters({0x5805de0, 0xc00037d1e0}, {0x55e738d, 0x17}, {0xc0001cb780, 0x2, 0x55e97da}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/ginkgo_dsl.go:238 +0x185 github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters({0x5805de0, 0xc00037d1e0}, {0x55e738d, 0x17}, {0xc00005d750, 0x1, 0x1}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/ginkgo_dsl.go:221 +0x1be github.com/stolostron/multicluster-observability-operator/tests/pkg/tests.TestObservabilityE2E(0x0) /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability-e2e-test_suite_test.go:129 +0x10a testing.tRunner(0xc00037d1e0, 0x56a6368) /Users/cqu/go/src/testing/testing.go:1259 +0x102 created by testing.(*T).Run /Users/cqu/go/src/testing/testing.go:1306 +0x35a + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_certrenew_test.go:30 Timed out after 371.950s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_certrenew_test.go:138 + �[1mSTEP�[0m: Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment �[1mSTEP�[0m: Deleting certificate secret to simulate certificate renew �[1mSTEP�[0m: Waiting for old pods removed: [observability-observatorium-api-59cc57bc7-76tln observability-observatorium-api-59cc57bc7-cgxgg observability-observatorium-api-59cc57bc7-gchbx observability-rbac-query-proxy-7f5c96596f-ccn9j observability-rbac-query-proxy-7f5c96596f-lm9bv observability-rbac-query-proxy-7f5c96596f-whnfv] and new pods created + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:30 Timed out after 180.001s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:69 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc000a628b0>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc0008cec00>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc00054ca70>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 + �[1mSTEP�[0m: Deleting metrics-collector-view clusterolebinding �[1mSTEP�[0m: Updating metrics-collector-view clusterolebinding + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc0005d06c0>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 + �[1mSTEP�[0m: Deleting metrics-collector-serving-certs-ca-bundle configmap + + \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.rhv b/tests/pkg/tests/results.xml.rhv new file mode 100644 index 000000000..3c0e71480 --- /dev/null +++ b/tests/pkg/tests/results.xml.rhv @@ -0,0 +1,59 @@ + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.000s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc003547d00>: { ProcessState: { pid: 13278, status: 256, rusage: { Utime: { Sec: 0, Usec: 346718, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 123883, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35676160, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36073, Majflt: 3140, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 171, Nvcsw: 70, Nivcsw: 3466, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.sno b/tests/pkg/tests/results.xml.sno new file mode 100644 index 000000000..bbc65cb64 --- /dev/null +++ b/tests/pkg/tests/results.xml.sno @@ -0,0 +1,90 @@ + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00082a7d0>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 + �[1mSTEP�[0m: Check addon resource requirement + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc0009d0480>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 + �[1mSTEP�[0m: Check metrics-collector resource requirement + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.001s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 + �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:103 Unexpected error: <*errors.errorString | 0xc0006502e0>: { s: "statefulset observability-thanos-compact should have 1 but got 0 ready replicas", } statefulset observability-thanos-compact should have 1 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:104 + �[1mSTEP�[0m: Wait for thanos compact pods are ready + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:155 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:165 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:192 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:200 + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:247 + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:142 Timed out after 121.190s. Expected success, but got an error: <*errors.errorString | 0xc00078c380>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:162 + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:43 Unexpected error: <*errors.StatusError | 0xc000978460>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "admission webhook \"vmulticlusterobservability.observability.open-cluster-management.io\" denied the request: MultiClusterObservability.observability.open-cluster-management.io \"observability\" is invalid: spec.storageConfig.alertmanagerStorageSize: Forbidden: is forbidden to update.", Reason: "Invalid", Details: { Name: "observability", Group: "observability.open-cluster-management.io", Kind: "MultiClusterObservability", UID: "", Causes: [ { Type: "FieldValueForbidden", Message: "Forbidden: is forbidden to update.", Field: "spec.storageConfig.alertmanagerStorageSize", }, ], RetryAfterSeconds: 0, }, Code: 422, }, } admission webhook "vmulticlusterobservability.observability.open-cluster-management.io" denied the request: MultiClusterObservability.observability.open-cluster-management.io "observability" is invalid: spec.storageConfig.alertmanagerStorageSize: Forbidden: is forbidden to update. occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:46 + �[1mSTEP�[0m: Modifying MCO CR for reconciling + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:99 Skip the case since the MCO CR did not set the nodeSelector /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:107 + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:131 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc00082a390>: { s: "the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI}", } the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI} /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:144 + �[1mSTEP�[0m: Resizing alertmanager storage + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:150 + + + + + + + + + + + + + + + + + + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00069b740>: { ProcessState: { pid: 63722, status: 256, rusage: { Utime: { Sec: 0, Usec: 472569, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 203915, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35209216, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 39035, Majflt: 16, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 844, Nvcsw: 61, Nivcsw: 6457, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 + + \ No newline at end of file From 473ccf3a9e0c00b520cf94ce5287175087e1f246 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sat, 2 Apr 2022 14:54:05 +0800 Subject: [PATCH 014/150] update for test case 1418 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 53 +++++++++++---------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index f0a34c01d..6049ff028 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -116,34 +116,35 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { - Eventually(func() error { - return utils.ModifyMCOAddonSpecMetrics(testOptions, true) - }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + }) - By("Waiting for MCO addon components ready") - Eventually(func() bool { - err, podList := utils.GetPodList( - testOptions, - false, - MCO_ADDON_NAMESPACE, - "component=metrics-collector", - ) - if len(podList.Items) == 1 && err == nil { - return true - } - return false - }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, true) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) - By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") - Eventually(func() error { - err = utils.CheckAllOBAsEnabled(testOptions) - if err != nil { - return err - } - return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) - }) + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + + By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { From f4825e6581e698634c488e9288f242514f925837 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 24 Apr 2022 11:43:13 +0800 Subject: [PATCH 015/150] improve the OCP watchdog alert forward Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 4a88e4a55..c57fbd669 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -369,7 +369,8 @@ var _ = Describe("", func() { sort.Strings(clusterIDsInAlerts) sort.Strings(expectClusterIdentifiers) - if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) { + sort.Strings(expectedOCPClusterIDs) + if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) && !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { return fmt.Errorf("Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster") } From 5db970b4d244e8d6a588fa3eb198ff9444fb0109 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 27 Apr 2022 11:21:54 +0800 Subject: [PATCH 016/150] add case for recording and metrics Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_metrics_test.go | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 71a14e6d6..606a98eda 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,6 +48,31 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration] (metrics/g1)", func() { + metricList := utils.GetDefaultMetricList(testOptions) + ignoreMetricMap := utils.GetIgnoreMetricMap() + _, etcdPodList := utils.GetPodList( + testOptions, + true, + "openshift-etcd", + "app=etcd", + ) + // ignore etcd network peer metrics for SNO cluster + if etcdPodList != nil && len(etcdPodList.Items) <= 0 { + ignoreMetricMap["etcd_network_peer_received_bytes_total"] = true + ignoreMetricMap["etcd_network_peer_sent_bytes_total"] = true + } + for _, name := range metricList { + _, ok := ignoreMetricMap[name] + if !ok { + Eventually(func() error { + err, _ := utils.ContainManagedClusterMetric(testOptions, name, []string{name}) + return err + }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) + } + } + }) + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration] (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) From 98a7ddb50a5441404fd2e58dace1cf3ae9dc31ac Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 16 May 2022 15:04:47 +0800 Subject: [PATCH 017/150] custome cert Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_route_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index e7ac27bac..61b6b958f 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("@BVT - [P1][Sev1][observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From f4644f45f787699fd123fa14628342c7f1e6617b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 16 May 2022 17:15:40 +0800 Subject: [PATCH 018/150] metrics addon Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 29 +++++++++++---------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 6049ff028..2d1172b8c 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -123,20 +123,6 @@ var _ = Describe("", func() { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) - By("Waiting for MCO addon components ready") - Eventually(func() bool { - err, podList := utils.GetPodList( - testOptions, - false, - MCO_ADDON_NAMESPACE, - "component=metrics-collector", - ) - if len(podList.Items) == 1 && err == nil { - return true - } - return false - }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) - By("Checking the status in managedclusteraddon reflects the endpoint operator status correctly") Eventually(func() error { err = utils.CheckAllOBAsEnabled(testOptions) @@ -147,6 +133,21 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + Eventually(func() error { + return utils.ModifyMCOAddonSpecMetrics(testOptions, true) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + + By("Waiting for MCO addon components ready") + Eventually(func() bool { + err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") + if len(podList.Items) == 1 && err == nil { + return true + } + return false + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { From a59ef5fcf0171f9512d6d835df39d2cb147040ce Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 16 May 2022 18:31:34 +0800 Subject: [PATCH 019/150] deployment check and local-cluster Signed-off-by: Chang Liang Qu --- .../tests/observability_deployment_test.go | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 tests/pkg/tests/observability_deployment_test.go diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go new file mode 100644 index 000000000..874f9adfd --- /dev/null +++ b/tests/pkg/tests/observability_deployment_test.go @@ -0,0 +1,79 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { + By("Waiting for MCO ready status") + Eventually(func() error { + err = utils.CheckMCOComponents(testOptions) + if err != nil { + testFailed = true + utils.PrintAllMCOPodsStatus(testOptions) + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*25, EventuallyIntervalSecond*10).Should(Succeed()) + + By("Check clustermanagementaddon CR is created") + Eventually(func() error { + _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + }) + + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { + By("Check endpoint-operator and metrics-collector pods are ready") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*10).Should(Succeed()) + + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + utils.PrintManagedClusterOBAObject(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) + +}) \ No newline at end of file From 834a58cb513d7e39f5b65b62bd5a1f6f036bf884 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 17 May 2022 11:32:55 +0800 Subject: [PATCH 020/150] exclude not working managedcluster into the obs.addon check list Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_deployment_test.go | 2 +- tests/pkg/utils/mco_managedcluster.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 874f9adfd..80c6b578c 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { }) It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { - By("Waiting for MCO ready status") + By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) if err != nil { diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index e1ef022b7..bd3bf33be 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -56,7 +56,7 @@ func ListManagedClusters(opt TestOptions) ([]string, error) { if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { obsControllerStr = obsController.(string) } - if obsControllerStr != "unreachable" { + if obsControllerStr != "unreachable" && obsControllerStr != "" { clusterNames = append(clusterNames, name) } } From bc4ea175e39c37fc51881841b8cc6a06d708e9a1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 23 May 2022 10:34:55 +0800 Subject: [PATCH 021/150] fix involved issue with no managed cluster error Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_managedcluster.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index bd3bf33be..071187872 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -52,12 +52,19 @@ func ListManagedClusters(opt TestOptions) ([]string, error) { name := metadata["name"].(string) labels := metadata["labels"].(map[string]interface{}) if labels != nil { - obsControllerStr := "" - if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { - obsControllerStr = obsController.(string) + workerManagerAddonStr := "" + if workerManagerAddon, ok := labels["feature.open-cluster-management.io/addon-work-manager"]; ok { + workerManagerAddonStr = workerManagerAddon.(string) } - if obsControllerStr != "unreachable" && obsControllerStr != "" { - clusterNames = append(clusterNames, name) + if workerManagerAddonStr != "unreachable" { + obsControllerStr := "" + if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { + obsControllerStr = obsController.(string) + } + klog.V(5).Infof("obsControllerStr is %s\n", obsControllerStr) + if obsControllerStr != "unreachable" { + clusterNames = append(clusterNames, name) + } } } } From b6b3c0e1a1b3b3b8cb10cad42e6c4d11dbd5be30 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 23 May 2022 11:01:37 +0800 Subject: [PATCH 022/150] fix log error Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_managedcluster.go | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index 071187872..c989e30b6 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -61,7 +61,6 @@ func ListManagedClusters(opt TestOptions) ([]string, error) { if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { obsControllerStr = obsController.(string) } - klog.V(5).Infof("obsControllerStr is %s\n", obsControllerStr) if obsControllerStr != "unreachable" { clusterNames = append(clusterNames, name) } From 8a381edd51478ffc4bfc8db6fc3668159c98d63b Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Mon, 23 May 2022 11:04:30 +0800 Subject: [PATCH 023/150] add quchangl-github into approve list --- tests/OWNERS | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/OWNERS b/tests/OWNERS index a21463147..3032a1600 100644 --- a/tests/OWNERS +++ b/tests/OWNERS @@ -1,8 +1,5 @@ approvers: - - morvencao - - songleo + - quchangl-github reviewers: - - clyang82 - marcolan018 - - bjoydeep - - haoqing0110 + From 4ad7ba6a9712e51405381deb06dc4a58473bc003 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sat, 28 May 2022 21:01:04 +0800 Subject: [PATCH 024/150] work managed addon is available Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_managedcluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index c989e30b6..086cdebbf 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -56,7 +56,7 @@ func ListManagedClusters(opt TestOptions) ([]string, error) { if workerManagerAddon, ok := labels["feature.open-cluster-management.io/addon-work-manager"]; ok { workerManagerAddonStr = workerManagerAddon.(string) } - if workerManagerAddonStr != "unreachable" { + if workerManagerAddonStr == "available" { obsControllerStr := "" if obsController, ok := labels["feature.open-cluster-management.io/addon-observability-controller"]; ok { obsControllerStr = obsController.(string) From 5c318f64bbe6eeb9ad6dbf587162fccfba8780ff Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 5 Jul 2022 19:12:25 +0800 Subject: [PATCH 025/150] update case name for STS Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 18d2ad9f4..a19019f85 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -234,7 +234,7 @@ var _ = Describe("", func() { } }) - It("[P2][Sev2][observability][Integration] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) From 0290a0cf0580f35d2edc4d9f23ed41bbe97464c9 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 6 Jul 2022 19:29:04 +0800 Subject: [PATCH 026/150] export metrics out of ACM Signed-off-by: Chang Liang Qu --- cicd-scripts/customize-mco.sh | 4 +- examples/export/custom-metrics-allowlist.yaml | 9 ++ examples/export/endpoints.yaml | 13 ++ examples/export/kustomization.yaml | 4 + .../v1beta2/custom-certs/kustomization.yaml | 2 + .../v1beta2/custom-certs/observability.yaml | 129 ++++++++++++++++++ examples/export/v1beta2/kustomization.yaml | 2 + examples/export/v1beta2/observability.yaml | 127 +++++++++++++++++ examples/export/victoriametrics.yaml | 48 +++++++ tests/pkg/tests/observability_export_test.go | 115 ++++++++++++++++ tests/pkg/utils/mco_export.go | 73 ++++++++++ 11 files changed, 524 insertions(+), 2 deletions(-) create mode 100644 examples/export/custom-metrics-allowlist.yaml create mode 100644 examples/export/endpoints.yaml create mode 100644 examples/export/kustomization.yaml create mode 100644 examples/export/v1beta2/custom-certs/kustomization.yaml create mode 100644 examples/export/v1beta2/custom-certs/observability.yaml create mode 100644 examples/export/v1beta2/kustomization.yaml create mode 100644 examples/export/v1beta2/observability.yaml create mode 100644 examples/export/victoriametrics.yaml create mode 100644 tests/pkg/tests/observability_export_test.go create mode 100644 tests/pkg/utils/mco_export.go diff --git a/cicd-scripts/customize-mco.sh b/cicd-scripts/customize-mco.sh index f0cd905f5..ecccda552 100755 --- a/cicd-scripts/customize-mco.sh +++ b/cicd-scripts/customize-mco.sh @@ -150,11 +150,11 @@ get_ginkgo_focus() { continue fi if [[ $file =~ ^operators/multiclusterobservability ]]; then - GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0" + GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0 --focus export/g0" continue fi if [[ $file =~ ^operators/pkg ]]; then - GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0 --focus endpoint_preserve/g0" + GINKGO_FOCUS+=" --focus addon/g0 --focus config/g0 --focus alert/g0 --focus alertforward/g0 --focus certrenew/g0 --focus grafana/g0 --focus grafana_dev/g0 --focus dashboard/g0 --focus manifestwork/g0 --focus metrics/g0 --focus observatorium_preserve/g0 --focus reconcile/g0 --focus retention/g0 --focus endpoint_preserve/g0 --focus export/g0" continue fi if [[ ${file} =~ ^pkg ]]; then diff --git a/examples/export/custom-metrics-allowlist.yaml b/examples/export/custom-metrics-allowlist.yaml new file mode 100644 index 000000000..a9b9ce245 --- /dev/null +++ b/examples/export/custom-metrics-allowlist.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: observability-metrics-custom-allowlist + namespace: open-cluster-management-observability +data: + metrics_list.yaml: | + names: + - acm_remote_write_requests_total \ No newline at end of file diff --git a/examples/export/endpoints.yaml b/examples/export/endpoints.yaml new file mode 100644 index 000000000..748779407 --- /dev/null +++ b/examples/export/endpoints.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: victoriametrics + namespace: open-cluster-management-observability +type: Opaque +stringData: + ep.yaml: | + url: http://victoriametrics:8428/api/v1/write + http_client_config: + basic_auth: + username: test + password: test \ No newline at end of file diff --git a/examples/export/kustomization.yaml b/examples/export/kustomization.yaml new file mode 100644 index 000000000..2f00885c4 --- /dev/null +++ b/examples/export/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- custom-metrics-allowlist.yaml +- endpoints.yaml +- victoriametrics.yaml diff --git a/examples/export/v1beta2/custom-certs/kustomization.yaml b/examples/export/v1beta2/custom-certs/kustomization.yaml new file mode 100644 index 000000000..f0c8a4650 --- /dev/null +++ b/examples/export/v1beta2/custom-certs/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- observability.yaml diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml new file mode 100644 index 000000000..da76fef20 --- /dev/null +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -0,0 +1,129 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + tlsSecretMountPath: /etc/minio/certs + tlsSecretName: minio-tls-secret + writeStorage: + - key: ep.yaml + name: victoriametrics + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/export/v1beta2/kustomization.yaml b/examples/export/v1beta2/kustomization.yaml new file mode 100644 index 000000000..f0c8a4650 --- /dev/null +++ b/examples/export/v1beta2/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- observability.yaml diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml new file mode 100644 index 000000000..e8857c906 --- /dev/null +++ b/examples/export/v1beta2/observability.yaml @@ -0,0 +1,127 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 2048 + maxItemSize: 2m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 1Gi + compactStorageSize: 1Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + writeStorage: + - key: ep.yaml + name: victoriametrics + receiveStorageSize: 1Gi + ruleStorageSize: 1Gi + storageClass: gp2 + storeStorageSize: 1Gi diff --git a/examples/export/victoriametrics.yaml b/examples/export/victoriametrics.yaml new file mode 100644 index 000000000..ab03f0f06 --- /dev/null +++ b/examples/export/victoriametrics.yaml @@ -0,0 +1,48 @@ +kind: Deployment +apiVersion: apps/v1 +metadata: + name: victoriametrics + namespace: open-cluster-management-observability +spec: + replicas: 1 + selector: + matchLabels: + app: victoriametrics + template: + metadata: + labels: + app: victoriametrics + spec: + containers: + - name: victoriametrics + image: >- + victoriametrics/victoria-metrics + ports: + - name: http + containerPort: 8428 + protocol: TCP + args: + - '--httpAuth.username=test' + - '--httpAuth.password=test' + volumeMounts: + - name: data + mountPath: /victoria-metrics-data + volumes: + - name: data + emptyDir: {} + +--- +kind: Service +apiVersion: v1 +metadata: + name: victoriametrics + namespace: open-cluster-management-observability +spec: + ports: + - name: http + protocol: TCP + port: 8428 + targetPort: http + type: ClusterIP + selector: + app: victoriametrics \ No newline at end of file diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go new file mode 100644 index 000000000..21bf71a3c --- /dev/null +++ b/tests/pkg/tests/observability_export_test.go @@ -0,0 +1,115 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package tests + +import ( + "fmt" + "os" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" +) + +var _ = Describe("Observability:", func() { + BeforeEach(func() { + hubClient = utils.NewKubeClient( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + + dynClient = utils.NewKubeClientDynamic( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext) + }) + + JustBeforeEach(func() { + Eventually(func() error { + clusters, clusterError = utils.ListManagedClusters(testOptions) + if clusterError != nil { + return clusterError + } + return nil + }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + It("RHACM4K-11170: Observability: Verify metrics would be exported to corp tools(2.5)(draft)[P2][Sev2][observability][Integration] Should have acm_remote_write_requests_total metrics with correct labels/value (export/g0)", func() { + By("Adding victoriametrics deployment/service/secret") + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/export"}) + Expect(err).ToNot(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) + + By("Updating mco cr to inject WriteStorage") + templatePath := "../../../examples/export/v1beta2" + if os.Getenv("IS_CANARY_ENV") != "true" { + templatePath = "../../../examples/export/v1beta2/custom-certs" + } + yamlB, err = kustomize.Render(kustomize.Options{KustomizationPath: templatePath}) + Expect(err).ToNot(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) + + By("Waiting for metrics acm_remote_write_requests_total on grafana console") + Eventually(func() error { + for _, cluster := range clusters { + query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", cluster) + err, _ := utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`}, + ) + if err != nil { + return err + } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"200`, `"name":"thanos-receiver"`}, + ) + if err != nil { + return fmt.Errorf("metrics not forwarded to thanos-receiver") + } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"204`, `"name":"victoriametrics"`}, + ) + if err != nil { + return fmt.Errorf("metrics not forwarded to victoriametrics") + } + } + return nil + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }) + + JustAfterEach(func() { + Expect(utils.CleanExportResources(testOptions)).NotTo(HaveOccurred()) + Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + utils.PrintMCOObject(testOptions) + utils.PrintAllMCOPodsStatus(testOptions) + utils.PrintAllOBAPodsStatus(testOptions) + } + testFailed = testFailed || CurrentGinkgoTestDescription().Failed + }) +}) diff --git a/tests/pkg/utils/mco_export.go b/tests/pkg/utils/mco_export.go new file mode 100644 index 000000000..4a97b74ad --- /dev/null +++ b/tests/pkg/utils/mco_export.go @@ -0,0 +1,73 @@ +// Copyright (c) 2021 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package utils + +import ( + "context" + "os" + + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" +) + +const ( + allowlistCMname = "observability-metrics-custom-allowlist" + endpointSName = "victoriametrics" + deploymentName = "victoriametrics" + svcName = "victoriametrics" +) + +func CleanExportResources(opt TestOptions) error { + hubClient := NewKubeClient( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext) + + templatePath := "../../../examples/mco/e2e/v1beta2" + if os.Getenv("IS_CANARY_ENV") != "true" { + templatePath = "../../../examples/mco/e2e/v1beta2/custom-certs" + } + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: templatePath}) + if err != nil { + return err + } + err = Apply( + opt.HubCluster.ClusterServerURL, + opt.KubeConfig, + opt.HubCluster.KubeContext, + yamlB, + ) + if err != nil { + return err + } + + err = hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE). + Delete(context.TODO(), allowlistCMname, metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return err + } + + err = hubClient.CoreV1().Secrets(MCO_NAMESPACE). + Delete(context.TODO(), endpointSName, metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return err + } + + err = hubClient.AppsV1().Deployments(MCO_NAMESPACE). + Delete(context.TODO(), deploymentName, metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return err + } + + err = hubClient.CoreV1().Services(MCO_NAMESPACE). + Delete(context.TODO(), svcName, metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return err + } + + klog.V(1).Infof("Clean up/reset all export related resources") + return nil +} From d8595ceebd9eeffcc42126aa11c682eb9efe6aa1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 22 Jul 2022 14:46:04 +0800 Subject: [PATCH 027/150] update automation according to Grafana changes in 2.6 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/results.xml | 169 ++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 tests/pkg/tests/results.xml diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml new file mode 100644 index 000000000..586f701c9 --- /dev/null +++ b/tests/pkg/tests/results.xml @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file From 456d98c9051f2a6436c8adeab3ffc87abea558d1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 4 Aug 2022 15:49:08 +0800 Subject: [PATCH 028/150] remaining update Signed-off-by: Chang Liang Qu --- cicd-scripts/run-e2e-tests.sh | 2 +- tests/grafana-dev-test.sh | 2 +- tests/pkg/tests/results.xml | 119 +++++++++++++-------------------- tests/pkg/utils/mco_grafana.go | 4 +- tools/README.md | 21 ++++-- tools/setup-grafana-dev.sh | 61 ++++++++++++++--- 6 files changed, 119 insertions(+), 90 deletions(-) diff --git a/cicd-scripts/run-e2e-tests.sh b/cicd-scripts/run-e2e-tests.sh index 4cd445aa3..1b0b6b807 100755 --- a/cicd-scripts/run-e2e-tests.sh +++ b/cicd-scripts/run-e2e-tests.sh @@ -82,7 +82,7 @@ else fi go mod vendor -${GINKGO_CMD} -debug -trace ${GINKGO_FOCUS} -v ${ROOTDIR}/tests/pkg/tests -- -options=${OPTIONSFILE} -v=3 +${GINKGO_CMD} -debug -trace ${GINKGO_FOCUS} -v ${ROOTDIR}/tests/pkg/tests -- -options=${OPTIONSFILE} -v=5 cat ${ROOTDIR}/tests/pkg/tests/results.xml | grep failures=\"0\" | grep errors=\"0\" if [ $? -ne 0 ]; then diff --git a/tests/grafana-dev-test.sh b/tests/grafana-dev-test.sh index 2b2abffef..c5f1c4e53 100755 --- a/tests/grafana-dev-test.sh +++ b/tests/grafana-dev-test.sh @@ -21,7 +21,7 @@ fi n=0 until [ "$n" -ge 30 ] do - kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev | grep "2/2" | grep "Running" && break + kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev | grep "3/3" | grep "Running" && break n=$((n+1)) echo "Retrying in 10s for waiting for grafana-dev pod ready ..." sleep 10 diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml index 586f701c9..8649834f8 100644 --- a/tests/pkg/tests/results.xml +++ b/tests/pkg/tests/results.xml @@ -1,41 +1,5 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -66,49 +30,58 @@ - - - - + - + - + - + - + - - + + + /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:76 Timed out after 600.001s. Expected success, but got an error: <*errors.errorString | 0xc0002e8090>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:102 + �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console - + + + + - + + + + + + + + - + - + - + - + - + - + @@ -117,53 +90,55 @@ - + - + - + - + - + - + - + - + - + - - + - + - + - + - + - + - + + + + \ No newline at end of file diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index aea3d1d1b..3dd550c0f 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -4,11 +4,11 @@ package utils func GetGrafanaURL(opt TestOptions) string { - grafanaConsoleURL := "https://multicloud-console.apps." + opt.HubCluster.BaseDomain + "/grafana/" + grafanaConsoleURL := "https://grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain if opt.HubCluster.GrafanaURL != "" { grafanaConsoleURL = opt.HubCluster.GrafanaURL } else { - opt.HubCluster.GrafanaHost = "multicloud-console.apps." + opt.HubCluster.BaseDomain + opt.HubCluster.GrafanaHost = "grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain } return grafanaConsoleURL } diff --git a/tools/README.md b/tools/README.md index e2633dfd7..78d6c8219 100644 --- a/tools/README.md +++ b/tools/README.md @@ -13,12 +13,21 @@ $ ./setup-grafana-dev.sh --deploy secret/grafana-dev-config created deployment.apps/grafana-dev created service/grafana-dev created -ingress.extensions/grafana-dev created +serviceaccount/grafana-dev created +clusterrolebinding.rbac.authorization.k8s.io/open-cluster-management:grafana-crb-dev created +route.route.openshift.io/grafana-dev created +persistentvolumeclaim/grafana-dev created +oauthclient.oauth.openshift.io/grafana-proxy-client-dev created +deployment.apps/grafana-dev patched +service/grafana-dev patched +route.route.openshift.io/grafana-dev patched +oauthclient.oauth.openshift.io/grafana-proxy-client-dev patched +clusterrolebinding.rbac.authorization.k8s.io/open-cluster-management:grafana-crb-dev patched ``` ## Switch user to be grafana admin -Secondly, you need to ask a user to login `https://$ACM_URL/grafana-dev/` before use this script `switch-to-grafana-admin.sh` to switch the user to be a grafana admin. +Secondly, you need to ask a user to login grafana-dev host before use this script `switch-to-grafana-admin.sh` to switch the user to be a grafana admin. ``` $ ./switch-to-grafana-admin.sh kube:admin @@ -83,6 +92,10 @@ You can use the following command to uninstall your grafana instance. $ ./setup-grafana-dev.sh --clean secret "grafana-dev-config" deleted deployment.apps "grafana-dev" deleted -service "grafana-dev" deleted -ingress.extensions "grafana-dev" deleted +Error from server (NotFound): services "grafana-dev" not found +serviceaccount "grafana-dev" deleted +route.route.openshift.io "grafana-dev" deleted +persistentvolumeclaim "grafana-dev" deleted +oauthclient.oauth.openshift.io "grafana-proxy-client-dev" deleted +clusterrolebinding.rbac.authorization.k8s.io "open-cluster-management:grafana-crb-dev" deleted ``` diff --git a/tools/setup-grafana-dev.sh b/tools/setup-grafana-dev.sh index 70ad3c9f1..f7aa316fa 100755 --- a/tools/setup-grafana-dev.sh +++ b/tools/setup-grafana-dev.sh @@ -58,6 +58,11 @@ deploy() { if [[ ${GROUP_ID} == "grafana" ]]; then GROUP_ID=472 fi + $sed_command "s~serviceAccount:.*$~serviceAccount: grafana-dev~g" grafana-dev-deploy.yaml + $sed_command "s~serviceAccountName:.*$~serviceAccountName: grafana-dev~g" grafana-dev-deploy.yaml + $sed_command "s~secretName: grafana-tls$~secretName: grafana-tls-dev~g" grafana-dev-deploy.yaml + $sed_command "s~--client-id=.*$~--client-id=grafana-proxy-client-dev~g" grafana-dev-deploy.yaml + $sed_command "s~--client-secret=.*$~--client-secret=grafana-proxy-client-dev~g" grafana-dev-deploy.yaml $sed_command "s~ securityContext:.*$~ securityContext: {fsGroup: ${GROUP_ID}}~g" grafana-dev-deploy.yaml sed "s~- emptyDir: {}$~- persistentVolumeClaim:$ claimName: grafana-dev~g" grafana-dev-deploy.yaml > grafana-dev-deploy.yaml.bak tr $ '\n' < grafana-dev-deploy.yaml.bak > grafana-dev-deploy.yaml @@ -74,17 +79,39 @@ deploy() { # For OCP 4.7, we should remove clusterIPs filed and IPs $sed_command "s~clusterIPs:.*$~ ~g" grafana-dev-svc.yaml $sed_command 's/\- [0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}//g' grafana-dev-svc.yaml + $sed_command "s~service.alpha.openshift.io/serving-cert-secret-name:.*$~service.alpha.openshift.io/serving-cert-secret-name: grafana-tls-dev~g" grafana-dev-svc.yaml + $sed_command "s~service.alpha.openshift.io/serving-cert-signed-by:.*$~~g" grafana-dev-svc.yaml + $sed_command "s~service.beta.openshift.io/serving-cert-signed-by:.*$~~g" grafana-dev-svc.yaml kubectl apply -f grafana-dev-svc.yaml - kubectl get ingress -n "$obs_namespace" grafana -o yaml > grafana-dev-ingress.yaml + + kubectl get sa -n "$obs_namespace" grafana -o yaml > grafana-dev-sa.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana serviceaccount" + exit 1 + fi + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-sa.yaml + $sed_command 's/{"kind":"Route","name":"grafana"}/{"kind":"Route","name":"grafana-dev"}/g' grafana-dev-sa.yaml + kubectl apply -f grafana-dev-sa.yaml + + kubectl get clusterrolebinding open-cluster-management:grafana-crb -o yaml > grafana-dev-crb.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana cluster role binding" + exit 1 + fi + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-crb.yaml + $sed_command "s~name: open-cluster-management:grafana-crb$~name: open-cluster-management:grafana-crb-dev~g" grafana-dev-crb.yaml + cat grafana-dev-crb.yaml + kubectl apply -f grafana-dev-crb.yaml + + kubectl get route -n "$obs_namespace" grafana -o yaml > grafana-dev-route.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana ingress" + echo "Failed to get grafana route" exit 1 fi - $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-ingress.yaml - $sed_command "s~serviceName: grafana$~serviceName: grafana-dev~g" grafana-dev-ingress.yaml - $sed_command "s~path: /grafana$~path: /grafana-dev~g" grafana-dev-ingress.yaml - kubectl apply -f grafana-dev-ingress.yaml + $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-route.yaml + $sed_command "s~host:.*$~~g" grafana-dev-route.yaml + kubectl apply -f grafana-dev-route.yaml cat >grafana-pvc.yaml < grafana-dev-oauthclient.yaml + if [ $? -ne 0 ]; then + echo "Failed to get grafana oauthclient" + exit 1 + fi + $sed_command "s~name: grafana-proxy-client$~name: grafana-proxy-client-dev~g" grafana-dev-oauthclient.yaml + $sed_command "s/https:\/\/grafana-/https:\/\/grafana-dev-/g" grafana-dev-oauthclient.yaml + $sed_command "s~secret: .*$~secret: grafana-proxy-client-dev~g" grafana-dev-oauthclient.yaml + kubectl apply -f grafana-dev-oauthclient.yaml + # clean all tmp files + rm -rf grafana-dev-deploy.yaml* grafana-dev-svc.yaml* grafana-dev-sa.yaml* grafana-dev-route.yaml* grafana-dev-crb.yaml* grafana-dev-oauthclient.yaml* grafana-dev-config.ini* grafana-pvc.yaml* # delete ownerReferences kubectl -n "$obs_namespace" patch deployment grafana-dev -p '{"metadata": {"ownerReferences":null}}' kubectl -n "$obs_namespace" patch svc grafana-dev -p '{"metadata": {"ownerReferences":null}}' - kubectl -n "$obs_namespace" patch ingress grafana-dev -p '{"metadata": {"ownerReferences":null}}' + kubectl -n "$obs_namespace" patch route grafana-dev -p '{"metadata": {"ownerReferences":null}}' + kubectl patch oauthclient grafana-proxy-client-dev -p '{"metadata": {"ownerReferences":null}}' + kubectl patch clusterrolebinding open-cluster-management:grafana-crb-dev -p '{"metadata": {"ownerReferences":null}}' } clean() { kubectl delete secret -n "$obs_namespace" grafana-dev-config kubectl delete deployment -n "$obs_namespace" grafana-dev kubectl delete svc -n "$obs_namespace" grafana-dev - kubectl delete ingress -n "$obs_namespace" grafana-dev + kubectl delete sa -n "$obs_namespace" grafana-dev + kubectl delete route -n "$obs_namespace" grafana-dev kubectl delete pvc -n "$obs_namespace" grafana-dev + kubectl delete oauthclient grafana-proxy-client-dev + kubectl delete clusterrolebinding open-cluster-management:grafana-crb-dev } msg() { From 44f10493841c5c6e5c9858772087da1f1186dbb1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 11 Aug 2022 13:53:55 +0800 Subject: [PATCH 029/150] update code for export metrics out of ACM Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_export_test.go | 58 ++++++++++---------- tests/pkg/utils/mco_metric.go | 1 + 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 21bf71a3c..433ab8f09 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -14,7 +14,7 @@ import ( "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) -var _ = Describe("Observability:", func() { +var _ = Describe("", func() { BeforeEach(func() { hubClient = utils.NewKubeClient( testOptions.HubCluster.ClusterServerURL, @@ -66,35 +66,35 @@ var _ = Describe("Observability:", func() { By("Waiting for metrics acm_remote_write_requests_total on grafana console") Eventually(func() error { - for _, cluster := range clusters { - query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", cluster) - err, _ := utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`}, - ) - if err != nil { - return err - } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"200`, `"name":"thanos-receiver"`}, - ) - if err != nil { - return fmt.Errorf("metrics not forwarded to thanos-receiver") - } - err, _ = utils.ContainManagedClusterMetric( - testOptions, - query, - []string{`"__name__":"acm_remote_write_requests_total"`, - `"code":"204`, `"name":"victoriametrics"`}, - ) - if err != nil { - return fmt.Errorf("metrics not forwarded to victoriametrics") - } + //for _, cluster := range clusters { + query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", "local-cluster") + err, _ := utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`}, + ) + if err != nil { + return err } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"200`, `"name":"thanos-receiver"`}, + ) + if err != nil { + return fmt.Errorf("metrics not forwarded to thanos-receiver") + } + err, _ = utils.ContainManagedClusterMetric( + testOptions, + query, + []string{`"__name__":"acm_remote_write_requests_total"`, + `"code":"204`, `"name":"victoriametrics"`}, + ) + if err != nil { + return fmt.Errorf("metrics not forwarded to victoriametrics") + } + //} return nil }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) diff --git a/tests/pkg/utils/mco_metric.go b/tests/pkg/utils/mco_metric.go index 504039aa1..851a64065 100644 --- a/tests/pkg/utils/mco_metric.go +++ b/tests/pkg/utils/mco_metric.go @@ -78,6 +78,7 @@ func ContainManagedClusterMetric(opt TestOptions, query string, matchedLabels [] contained := true for _, label := range matchedLabels { + klog.V(5).Infof("label is: %s\n", label) if !strings.Contains(string(metricResult), label) { contained = false break From d0957ee6f09ec66e763ed7befdcef9b7f64dfe64 Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Sat, 27 Aug 2022 06:34:37 +0800 Subject: [PATCH 030/150] update docker image as Vincent request --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 735c80ee7..3f5111c9f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ pipeline { agent { docker { - image 'quay.io/rhn_support_abutt/ginkgo_1_14_2-linux-go' + image 'quay.io/vboulos/acmqe-automation/ginkgo_1_14_2-linux-go' args '--network host -u 0:0' } } From 6742ead243a79f20a8bc975d697a52323fe94c00 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sat, 27 Aug 2022 18:29:39 +0800 Subject: [PATCH 031/150] only check local-cluster addon Signed-off-by: Chang Liang Qu --- .../tests/observability_deployment_test.go | 8 +++--- tests/pkg/utils/mco_oba.go | 25 +++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 80c6b578c..5f3a6b5c0 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -38,7 +38,7 @@ var _ = Describe("", func() { testFailed = false return nil }, EventuallyTimeoutMinute*25, EventuallyIntervalSecond*10).Should(Succeed()) - + By("Check clustermanagementaddon CR is created") Eventually(func() error { _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()).Get(context.TODO(), "observability-controller", metav1.GetOptions{}) @@ -52,10 +52,10 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { - err = utils.CheckAllOBAsEnabled(testOptions) + err = utils.CheckAllOBAsEnabledLocal(testOptions) if err != nil { testFailed = true return err @@ -76,4 +76,4 @@ var _ = Describe("", func() { testFailed = testFailed || CurrentGinkgoTestDescription().Failed }) -}) \ No newline at end of file +}) diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go index 3a0dfed34..45f5887d6 100644 --- a/tests/pkg/utils/mco_oba.go +++ b/tests/pkg/utils/mco_oba.go @@ -89,6 +89,31 @@ func CheckAllOBAsEnabled(opt TestOptions) error { return nil } +func CheckAllOBAsEnabledLocal(opt TestOptions) error { + clusters, err := ListManagedClusters(opt) + if err != nil { + return err + } + klog.V(1).Infof("Have the following managedclusters: <%v>", clusters) + + for _, cluster := range clusters { + if cluster == `local-cluster` { + klog.V(1).Infof("Check OBA status for cluster <%v>", cluster) + err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + if err != nil { + return err + } + + klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + if err != nil { + return err + } + } + } + return nil +} + func CheckAllOBADisabled(opt TestOptions) error { clusters, err := ListManagedClusters(opt) if err != nil { From aadfe5546f172e483dd1300cac26f9cd219581aa Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 26 Sep 2022 16:17:50 +0800 Subject: [PATCH 032/150] raise pv size and add sleep for metrics export Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_export_test.go | 5 + .../pkg/tests/observability_reconcile_test.go | 2 +- tests/pkg/tests/results.xml | 127 +++++++++++------- 3 files changed, 82 insertions(+), 52 deletions(-) diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 433ab8f09..fffbde32a 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -6,6 +6,7 @@ package tests import ( "fmt" "os" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -67,6 +68,8 @@ var _ = Describe("", func() { By("Waiting for metrics acm_remote_write_requests_total on grafana console") Eventually(func() error { //for _, cluster := range clusters { + // wait for pod restarting + time.Sleep(60 * time.Second) query := fmt.Sprintf("acm_remote_write_requests_total{cluster=\"%s\"} offset 1m", "local-cluster") err, _ := utils.ContainManagedClusterMetric( testOptions, @@ -100,7 +103,9 @@ var _ = Describe("", func() { }) JustAfterEach(func() { + Expect(utils.CleanExportResources(testOptions)).NotTo(HaveOccurred()) + time.Sleep(120 * time.Second) Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 892d4905b..4c72ef761 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -136,7 +136,7 @@ var _ = Describe("", func() { Expect(len(alertmans.Items)).NotTo(Equal(0)) Eventually(func() error { - err := utils.CheckStorageResize(testOptions, (*alertmans).Items[0].Name, "2Gi") + err := utils.CheckStorageResize(testOptions, (*alertmans).Items[0].Name, "3Gi") if err != nil { return err } diff --git a/tests/pkg/tests/results.xml b/tests/pkg/tests/results.xml index 8649834f8..642d4bf10 100644 --- a/tests/pkg/tests/results.xml +++ b/tests/pkg/tests/results.xml @@ -1,123 +1,126 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:76 Timed out after 600.001s. Expected success, but got an error: <*errors.errorString | 0xc0002e8090>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:102 - �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console + + - - - - + + + + - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + @@ -129,16 +132,38 @@ - + - + - + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file From 645b4faa150e7665821ae5652e74a7e1eeb15eea Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 31 Oct 2022 14:27:31 +0800 Subject: [PATCH 033/150] raise pvc size and fix resize issue Signed-off-by: Chang Liang Qu --- .../observability-v1beta1-to-v1beta2-golden.yaml | 10 +++++----- examples/mco/e2e/v1beta1/observability.yaml | 2 +- .../mco/e2e/v1beta2/custom-certs/observability.yaml | 10 +++++----- examples/mco/e2e/v1beta2/observability.yaml | 10 +++++----- tests/pkg/utils/mco_deploy.go | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml index d669314f3..74cde6fa6 100644 --- a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml +++ b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml @@ -17,12 +17,12 @@ spec: enableMetrics: true interval: 300 storageConfig: - alertmanagerStorageSize: 1Gi - compactStorageSize: 1Gi + alertmanagerStorageSize: 2Gi + compactStorageSize: 2Gi metricObjectStorage: key: thanos.yaml name: thanos-object-storage - receiveStorageSize: 1Gi - ruleStorageSize: 1Gi + receiveStorageSize: 2Gi + ruleStorageSize: 2Gi storageClass: gp2 - storeStorageSize: 1Gi + storeStorageSize: 2Gi diff --git a/examples/mco/e2e/v1beta1/observability.yaml b/examples/mco/e2e/v1beta1/observability.yaml index ee59f4ce4..6f77f74ca 100644 --- a/examples/mco/e2e/v1beta1/observability.yaml +++ b/examples/mco/e2e/v1beta1/observability.yaml @@ -14,5 +14,5 @@ spec: metricObjectStorage: key: thanos.yaml name: thanos-object-storage - statefulSetSize: 1Gi + statefulSetSize: 2Gi statefulSetStorageClass: gp2 diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 6137a66b6..a1f1f3951 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -113,14 +113,14 @@ spec: cpu: 10m memory: 100Mi storageConfig: - alertmanagerStorageSize: 1Gi - compactStorageSize: 1Gi + alertmanagerStorageSize: 2Gi + compactStorageSize: 2Gi metricObjectStorage: key: thanos.yaml name: thanos-object-storage tlsSecretMountPath: /etc/minio/certs tlsSecretName: minio-tls-secret - receiveStorageSize: 1Gi - ruleStorageSize: 1Gi + receiveStorageSize: 2Gi + ruleStorageSize: 2Gi storageClass: gp2 - storeStorageSize: 1Gi + storeStorageSize: 2Gi diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index ccac6d2bc..13588d5b4 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -113,12 +113,12 @@ spec: cpu: 10m memory: 100Mi storageConfig: - alertmanagerStorageSize: 1Gi - compactStorageSize: 1Gi + alertmanagerStorageSize: 2Gi + compactStorageSize: 2Gi metricObjectStorage: key: thanos.yaml name: thanos-object-storage - receiveStorageSize: 1Gi - ruleStorageSize: 1Gi + receiveStorageSize: 2Gi + ruleStorageSize: 2Gi storageClass: gp2 - storeStorageSize: 1Gi + storeStorageSize: 2Gi diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index 4b66e3f27..ecbb47f21 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -472,7 +472,7 @@ func ModifyMCOCR(opt TestOptions) error { } spec := mco.Object["spec"].(map[string]interface{}) storageConfig := spec["storageConfig"].(map[string]interface{}) - storageConfig["alertmanagerStorageSize"] = "2Gi" + storageConfig["alertmanagerStorageSize"] = "3Gi" advRetentionCon, _ := CheckAdvRetentionConfig(opt) if advRetentionCon { From 1b209ff4f10d8fda10994acd454f5cff67e13b08 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 2 Nov 2022 16:47:14 +0800 Subject: [PATCH 034/150] update jenkins file to delete job after 30 days Signed-off-by: Chang Liang Qu --- Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 3f5111c9f..1a83c5a70 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,4 +1,7 @@ pipeline { + options { + buildDiscarder(logRotator(daysToKeepStr: '30')) + } agent { docker { image 'quay.io/vboulos/acmqe-automation/ginkgo_1_14_2-linux-go' From d462eab40b7a57cafff0a40f64669a846ec9f10d Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 26 Dec 2022 17:16:14 +0800 Subject: [PATCH 035/150] consider uwl collector Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 2d1172b8c..c8d305307 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -141,7 +141,7 @@ var _ = Describe("", func() { By("Waiting for MCO addon components ready") Eventually(func() bool { err, podList := utils.GetPodList(testOptions, false, MCO_ADDON_NAMESPACE, "component=metrics-collector") - if len(podList.Items) == 1 && err == nil { + if len(podList.Items) >= 1 && err == nil { return true } return false @@ -204,7 +204,7 @@ var _ = Describe("", func() { MCO_ADDON_NAMESPACE, "component=metrics-collector", ) - if len(podList.Items) == 1 && err == nil { + if len(podList.Items) >= 1 && err == nil { return true } return false From f96e4f9cca000c61d6d9e03b7b9c9ed6af799f1f Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 5 Jan 2023 15:23:51 +0800 Subject: [PATCH 036/150] fix alert forward failure Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index c57fbd669..582065a3b 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -360,7 +360,7 @@ var _ = Describe("", func() { for _, alt := range postableAlerts { if alt.Labels != nil { labelSets := map[string]string(alt.Labels) - clusterID := labelSets["cluster"] + clusterID := labelSets["managed_cluster"] if clusterID != "" { clusterIDsInAlerts = append(clusterIDsInAlerts, clusterID) } From bb59e4e24538d7ea45f3dd35d688d44a8825c300 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 17 Jan 2023 10:02:04 +0800 Subject: [PATCH 037/150] for case 1259 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index c8d305307..4af9cd9ba 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -172,6 +172,20 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable] (deploy/g1)", func() { + + Eventually(func() error { + return utils.UpdateObservabilityFromManagedCluster(testOptions, false) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + + klog.V(1).Infof("managedcluster number is <%d>", len(testOptions.ManagedClusters)) + if len(testOptions.ManagedClusters) >= 1 { + Eventually(func() bool { + return true + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + } + }) + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { From 628b1c72ea3eae30ec38634f715ed47b78bd174a Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Fri, 3 Feb 2023 13:43:56 +0800 Subject: [PATCH 038/150] echo CLOUD_PROVIDER --- Jenkinsfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 1a83c5a70..bb4793bca 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,6 +50,8 @@ pipeline { export REGION="${params.REGION}" export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" + echo "${params.CLOUD_PROVIDER}" + echo $CLOUD_PROVIDER if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" From 2f2aa0d6d8985a291d0d0ab432c8daab71f86476 Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Fri, 3 Feb 2023 13:49:34 +0800 Subject: [PATCH 039/150] update for cloud provider --- Jenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index bb4793bca..ab0603d23 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,7 +50,6 @@ pipeline { export REGION="${params.REGION}" export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" - echo "${params.CLOUD_PROVIDER}" echo $CLOUD_PROVIDER if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then From f28aef08d5652abb2c89ac958aa5c49e7bfd816b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Feb 2023 15:26:45 +0800 Subject: [PATCH 040/150] skip cases are not supported on the VMware Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_reconcile_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 4c72ef761..ccffe5ad1 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -6,6 +6,8 @@ package tests import ( "context" "fmt" + "os" + "strings" "time" . "github.com/onsi/ginkgo" @@ -129,6 +131,9 @@ var _ = Describe("", func() { }) It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + Skip("Skip the case due to it's not supported on the VMWARE") + } By("Resizing alertmanager storage") alertmans, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: ALERTMANAGER_LABEL, @@ -145,6 +150,9 @@ var _ = Describe("", func() { }) It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + Skip("Skip the case due to it's not supported on the VMWARE") + } advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) if !advRetentionCon { Skip("Skip the case since " + err.Error()) From 33b88262a7ae75ddd3298771ae075bafe65a5fdf Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Fri, 3 Feb 2023 15:27:59 +0800 Subject: [PATCH 041/150] remove cloud provider --- Jenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index ab0603d23..1a83c5a70 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,7 +50,6 @@ pipeline { export REGION="${params.REGION}" export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" - echo $CLOUD_PROVIDER if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" From 7bb6e68b5034cb0fece27b9c9edd61be36425069 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Feb 2023 16:58:19 +0800 Subject: [PATCH 042/150] sync two yaml files same to avoid storage size is not allowed to update on the vmware Signed-off-by: Chang Liang Qu --- examples/export/v1beta2/observability.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index e8857c906..7eec99531 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -113,15 +113,15 @@ spec: cpu: 10m memory: 100Mi storageConfig: - alertmanagerStorageSize: 1Gi - compactStorageSize: 1Gi + alertmanagerStorageSize: 2Gi + compactStorageSize: 2Gi metricObjectStorage: key: thanos.yaml name: thanos-object-storage writeStorage: - key: ep.yaml name: victoriametrics - receiveStorageSize: 1Gi - ruleStorageSize: 1Gi + receiveStorageSize: 2Gi + ruleStorageSize: 2Gi storageClass: gp2 - storeStorageSize: 1Gi + storeStorageSize: 2Gi From dff6355168f0b55f668c2e393610484699e1063d Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 6 Feb 2023 11:14:53 +0800 Subject: [PATCH 043/150] update jenkins file to add CLOUD_PROVIDER Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 1a83c5a70..5bb274fbb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -9,6 +9,7 @@ pipeline { } } parameters { + string(name:'CLOUD_PROVIDER', defaultValue: '', description: 'Cloud provider, OCP and ACM version information, like VMWARE-412-264, AWS-411') string(name:'HUB_CLUSTER_NAME', defaultValue: '', description: 'Name of Hub cluster') string(name:'BASE_DOMAIN', defaultValue: '', description: 'Base domain of Hub cluster') string(name:'OC_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'OCP Hub User Name') @@ -36,6 +37,7 @@ pipeline { stage('Test Run') { steps { sh """ + export CLOUD_PROVIDER="${params.CLOUD_PROVIDER}" export OC_CLUSTER_USER="${params.OC_CLUSTER_USER}" export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" From 595e82e6fb6c5ca723af431021e5dabbac3f600d Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 8 Feb 2023 10:03:28 +0800 Subject: [PATCH 044/150] remove the skip condition in the upgrading env Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_config_test.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index a19019f85..10f34af52 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -6,7 +6,6 @@ package tests import ( "context" "fmt" - "os" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -31,9 +30,11 @@ var _ = Describe("", func() { }) It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable](config/g0)", func() { - if os.Getenv("SKIP_INSTALL_STEP") == "true" { - Skip("Skip the case due to MCO CR was created customized") - } + /* + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + Skip("Skip the case due to MCO CR was created customized") + } + */ mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { @@ -45,9 +46,11 @@ var _ = Describe("", func() { }) It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable] (config/g0)", func() { - if os.Getenv("SKIP_INSTALL_STEP") == "true" { - Skip("Skip the case due to MCO CR was created customized") - } + /* + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + Skip("Skip the case due to MCO CR was created customized") + } + */ mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) From eef4c3028118187c7440391ec9a75d329a85b883 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 8 Feb 2023 17:12:28 +0800 Subject: [PATCH 045/150] fix customized api error with add BearerToken Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_route_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 61b6b958f..d958e15a4 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -60,6 +60,7 @@ var _ = Describe("", func() { client := &http.Client{} if os.Getenv("IS_KIND_ENV") != "true" { client.Transport = tr + BearerToken, err = utils.FetchBearerToken(testOptions) req.Header.Set("Authorization", "Bearer "+BearerToken) } @@ -85,7 +86,7 @@ var _ = Describe("", func() { return fmt.Errorf("Failed to find metric name from response") } return nil - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) It("@BVT - [P1][Sev1][observability][Integration] Should access alert via alertmanager route (route/g0)", func() { @@ -133,6 +134,7 @@ var _ = Describe("", func() { client := &http.Client{} if os.Getenv("IS_KIND_ENV") != "true" { client.Transport = tr + BearerToken, err = utils.FetchBearerToken(testOptions) alertPostReq.Header.Set("Authorization", "Bearer "+BearerToken) } if !alertCreated { @@ -161,6 +163,7 @@ var _ = Describe("", func() { } if os.Getenv("IS_KIND_ENV") != "true" { + BearerToken, err = utils.FetchBearerToken(testOptions) alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) } From 4b806ccc8f86a0e3175c470e756e4a38568aec39 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 15 Feb 2023 15:38:05 +0800 Subject: [PATCH 046/150] automated test case for updating label into the cm Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_grafana_test.go | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index bac28b52c..1b118af09 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -4,10 +4,16 @@ package tests import ( + "context" + "encoding/json" "fmt" + "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) @@ -46,6 +52,57 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable] (grafana/g1)", func() { + Eventually(func() bool { + clientDynamic := utils.GetKubeClientDynamic(testOptions, true) + objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.V(1).Infof("Get the managedcluster failed, The err is: %s\n", err) + } + + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + name := metadata["name"].(string) + if name == "local-cluster" { + labels := metadata["labels"].(map[string]interface{}) + labels["autolabel"] = "grafanacm" + klog.V(1).Infof("The cluster with new label: %s\n", labels) + _, updateErr := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).Update(context.TODO(), &obj, metav1.UpdateOptions{}) + if updateErr != nil { + klog.V(1).Infof("Update label failed, updateErr is : %s\n", updateErr) + } + } + + } + + var ( + errcm error + cm *v1.ConfigMap + ) + errcm, cm = utils.GetConfigMap( + testOptions, + false, + "observability-managed-cluster-label-allowlist", + MCO_NAMESPACE, + ) + if errcm != nil { + klog.V(1).Infof("The errcm is: %s\n", errcm) + } + + data, err := json.Marshal(cm) + if err != nil { + klog.V(1).Infof("The err is: %s\n", err) + } + if !strings.Contains(string(data), "autolabel") { + klog.V(1).Infof("new managedcluster label autolabel is NOT added into configmap observability-managed-cluster-label-allowlist") + return false + } else { + klog.V(1).Infof("new managedcluster label autolabel is added into configmap observability-managed-cluster-label-allowlist") + return true + } + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) + }) + JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) From 645d589d31911fadc7f9ae9c32c14c68a292cbda Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 15 Feb 2023 15:38:05 +0800 Subject: [PATCH 047/150] automated test case for updating label into the cm Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_grafana_test.go | 57 +++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index bac28b52c..1b118af09 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -4,10 +4,16 @@ package tests import ( + "context" + "encoding/json" "fmt" + "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) @@ -46,6 +52,57 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable] (grafana/g1)", func() { + Eventually(func() bool { + clientDynamic := utils.GetKubeClientDynamic(testOptions, true) + objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.V(1).Infof("Get the managedcluster failed, The err is: %s\n", err) + } + + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + name := metadata["name"].(string) + if name == "local-cluster" { + labels := metadata["labels"].(map[string]interface{}) + labels["autolabel"] = "grafanacm" + klog.V(1).Infof("The cluster with new label: %s\n", labels) + _, updateErr := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).Update(context.TODO(), &obj, metav1.UpdateOptions{}) + if updateErr != nil { + klog.V(1).Infof("Update label failed, updateErr is : %s\n", updateErr) + } + } + + } + + var ( + errcm error + cm *v1.ConfigMap + ) + errcm, cm = utils.GetConfigMap( + testOptions, + false, + "observability-managed-cluster-label-allowlist", + MCO_NAMESPACE, + ) + if errcm != nil { + klog.V(1).Infof("The errcm is: %s\n", errcm) + } + + data, err := json.Marshal(cm) + if err != nil { + klog.V(1).Infof("The err is: %s\n", err) + } + if !strings.Contains(string(data), "autolabel") { + klog.V(1).Infof("new managedcluster label autolabel is NOT added into configmap observability-managed-cluster-label-allowlist") + return false + } else { + klog.V(1).Infof("new managedcluster label autolabel is added into configmap observability-managed-cluster-label-allowlist") + return true + } + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) + }) + JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) From 2e6213ea1e92fd5f077b9ee134a99ffcabf7efcf Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 20 Feb 2023 13:47:22 +0800 Subject: [PATCH 048/150] add auto case for disable alert forward Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 141 ++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 582065a3b..a03e060a8 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -378,6 +378,147 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] (alertforward/g1)", func() { + amURL := url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + q := amURL.Query() + q.Set("filter", "alertname=Watchdog") + amURL.RawQuery = q.Encode() + + caCrt, err := utils.GetRouterCA(hubClient) + Expect(err).NotTo(HaveOccurred()) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(caCrt) + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + }, + } + + alertGetReq, err := http.NewRequest("GET", amURL.String(), nil) + Expect(err).NotTo(HaveOccurred()) + + if os.Getenv("IS_KIND_ENV") != "true" { + if BearerToken == "" { + BearerToken, err = utils.FetchBearerToken(testOptions) + Expect(err).NotTo(HaveOccurred()) + } + alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) + } + + //expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + //Expect(err).NotTo(HaveOccurred()) + expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) + Expect(err).NotTo(HaveOccurred()) + // expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) + + // install watchdog PrometheusRule to *KS clusters + watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: watchDogRuleKustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + for _, ks := range expectedKSClusterNames { + for idx, mc := range testOptions.ManagedClusters { + if mc.Name == ks { + err = utils.Apply( + testOptions.ManagedClusters[idx].ClusterServerURL, + testOptions.ManagedClusters[idx].KubeConfig, + testOptions.ManagedClusters[idx].KubeContext, + yamlB, + ) + Expect(err).NotTo(HaveOccurred()) + } + } + } + + By("Add annotations to disable alert forward") + mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec := mco.Object["metadata"].(map[string]interface{}) + annotations, found := spec["annotations"].(map[string]interface{}) + if !found { + annotations = make(map[string]interface{}) + } + annotations["mco-disable-alerting"] = "true" + spec["annotations"] = annotations + _, updateErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + klog.Errorf("err: %+v\n", err) + } + + By("Checking Watchdog alerts is not forwarded to the hub") + Eventually(func() bool { + resp, err := client.Do(alertGetReq) + if err != nil { + klog.Errorf("err: %+v\n", err) + return false + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("err: %+v\n", resp) + return false + } + + alertResult, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + + postableAlerts := models.PostableAlerts{} + err = json.Unmarshal(alertResult, &postableAlerts) + if err != nil { + return false + } + klog.V(3).Infof("postableAlerts is %+v", postableAlerts) + + for _, alt := range postableAlerts { + klog.V(3).Infof("alt.Labels is %s", alt.Labels) + if alt.Labels != nil { + klog.V(3).Infof("waiting alerts are disappeared?") + return false + } + } + + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec1 := mco.Object["metadata"].(map[string]interface{}) + delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) + + return true + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*60).Should(BeTrue()) + /* + By("Recover MCOCR to remove disable alert forward") + + mco, getErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr1 != nil { + klog.Errorf("err: %+v\n", getErr1) + } + spec2 := mco.Object["metadata"].(map[string]interface{}) + + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + delete(spec["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec is %s", spec) + */ + }) + JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) From a3799069145e24d2f71671ac83c48d8993024f87 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 20 Feb 2023 13:47:22 +0800 Subject: [PATCH 049/150] add auto case for disable alert forward Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 141 ++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 582065a3b..a03e060a8 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -378,6 +378,147 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) + It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] (alertforward/g1)", func() { + amURL := url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + q := amURL.Query() + q.Set("filter", "alertname=Watchdog") + amURL.RawQuery = q.Encode() + + caCrt, err := utils.GetRouterCA(hubClient) + Expect(err).NotTo(HaveOccurred()) + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(caCrt) + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + }, + } + + alertGetReq, err := http.NewRequest("GET", amURL.String(), nil) + Expect(err).NotTo(HaveOccurred()) + + if os.Getenv("IS_KIND_ENV") != "true" { + if BearerToken == "" { + BearerToken, err = utils.FetchBearerToken(testOptions) + Expect(err).NotTo(HaveOccurred()) + } + alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) + } + + //expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + //Expect(err).NotTo(HaveOccurred()) + expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) + Expect(err).NotTo(HaveOccurred()) + // expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) + + // install watchdog PrometheusRule to *KS clusters + watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: watchDogRuleKustomizationPath}) + Expect(err).NotTo(HaveOccurred()) + for _, ks := range expectedKSClusterNames { + for idx, mc := range testOptions.ManagedClusters { + if mc.Name == ks { + err = utils.Apply( + testOptions.ManagedClusters[idx].ClusterServerURL, + testOptions.ManagedClusters[idx].KubeConfig, + testOptions.ManagedClusters[idx].KubeContext, + yamlB, + ) + Expect(err).NotTo(HaveOccurred()) + } + } + } + + By("Add annotations to disable alert forward") + mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec := mco.Object["metadata"].(map[string]interface{}) + annotations, found := spec["annotations"].(map[string]interface{}) + if !found { + annotations = make(map[string]interface{}) + } + annotations["mco-disable-alerting"] = "true" + spec["annotations"] = annotations + _, updateErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr != nil { + klog.Errorf("err: %+v\n", err) + } + + By("Checking Watchdog alerts is not forwarded to the hub") + Eventually(func() bool { + resp, err := client.Do(alertGetReq) + if err != nil { + klog.Errorf("err: %+v\n", err) + return false + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + klog.Errorf("err: %+v\n", resp) + return false + } + + alertResult, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false + } + + postableAlerts := models.PostableAlerts{} + err = json.Unmarshal(alertResult, &postableAlerts) + if err != nil { + return false + } + klog.V(3).Infof("postableAlerts is %+v", postableAlerts) + + for _, alt := range postableAlerts { + klog.V(3).Infof("alt.Labels is %s", alt.Labels) + if alt.Labels != nil { + klog.V(3).Infof("waiting alerts are disappeared?") + return false + } + } + + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec1 := mco.Object["metadata"].(map[string]interface{}) + delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) + + return true + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*60).Should(BeTrue()) + /* + By("Recover MCOCR to remove disable alert forward") + + mco, getErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr1 != nil { + klog.Errorf("err: %+v\n", getErr1) + } + spec2 := mco.Object["metadata"].(map[string]interface{}) + + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + delete(spec["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec is %s", spec) + */ + }) + JustAfterEach(func() { Expect(utils.IntegrityChecking(testOptions)).NotTo(HaveOccurred()) }) From 9da95e90ece3a0e65209eabe43a892ec087eeab9 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 22 Feb 2023 13:05:27 +0800 Subject: [PATCH 050/150] improve disable alert case Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 47 ++++++--------------- 1 file changed, 13 insertions(+), 34 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index a03e060a8..31bebd478 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -410,13 +410,9 @@ var _ = Describe("", func() { alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) } - //expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") - //Expect(err).NotTo(HaveOccurred()) expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) Expect(err).NotTo(HaveOccurred()) - // expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) - // install watchdog PrometheusRule to *KS clusters watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: watchDogRuleKustomizationPath}) Expect(err).NotTo(HaveOccurred()) @@ -485,38 +481,21 @@ var _ = Describe("", func() { } } - klog.V(3).Infof("before enable alert forward - spec is %s", spec) - mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - klog.Errorf("err: %+v\n", err) - } - spec1 := mco.Object["metadata"].(map[string]interface{}) - delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") - _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr1 != nil { - klog.Errorf("err: %+v\n", updateErr1) - } - klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) - return true - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*60).Should(BeTrue()) - /* - By("Recover MCOCR to remove disable alert forward") - - mco, getErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr1 != nil { - klog.Errorf("err: %+v\n", getErr1) - } - spec2 := mco.Object["metadata"].(map[string]interface{}) + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*120).Should(BeTrue()) - klog.V(3).Infof("before enable alert forward - spec is %s", spec) - delete(spec["annotations"].(map[string]interface{}), "mco-disable-alerting") - _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr1 != nil { - klog.Errorf("err: %+v\n", updateErr1) - } - klog.V(3).Infof("enable alert forward - spec is %s", spec) - */ + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + mco1, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec1 := mco1.Object["metadata"].(map[string]interface{}) + delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco1, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) }) JustAfterEach(func() { From 46ef0954fcc7f1b128fb8a949da20e1e678cb914 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 22 Feb 2023 13:05:27 +0800 Subject: [PATCH 051/150] improve disable alert case Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 47 ++++++--------------- 1 file changed, 13 insertions(+), 34 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index a03e060a8..31bebd478 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -410,13 +410,9 @@ var _ = Describe("", func() { alertGetReq.Header.Set("Authorization", "Bearer "+BearerToken) } - //expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") - //Expect(err).NotTo(HaveOccurred()) expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) Expect(err).NotTo(HaveOccurred()) - // expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) - // install watchdog PrometheusRule to *KS clusters watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: watchDogRuleKustomizationPath}) Expect(err).NotTo(HaveOccurred()) @@ -485,38 +481,21 @@ var _ = Describe("", func() { } } - klog.V(3).Infof("before enable alert forward - spec is %s", spec) - mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr != nil { - klog.Errorf("err: %+v\n", err) - } - spec1 := mco.Object["metadata"].(map[string]interface{}) - delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") - _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr1 != nil { - klog.Errorf("err: %+v\n", updateErr1) - } - klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) - return true - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*60).Should(BeTrue()) - /* - By("Recover MCOCR to remove disable alert forward") - - mco, getErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) - if getErr1 != nil { - klog.Errorf("err: %+v\n", getErr1) - } - spec2 := mco.Object["metadata"].(map[string]interface{}) + }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*120).Should(BeTrue()) - klog.V(3).Infof("before enable alert forward - spec is %s", spec) - delete(spec["annotations"].(map[string]interface{}), "mco-disable-alerting") - _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco, metav1.UpdateOptions{}) - if updateErr1 != nil { - klog.Errorf("err: %+v\n", updateErr1) - } - klog.V(3).Infof("enable alert forward - spec is %s", spec) - */ + klog.V(3).Infof("before enable alert forward - spec is %s", spec) + mco1, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + klog.Errorf("err: %+v\n", err) + } + spec1 := mco1.Object["metadata"].(map[string]interface{}) + delete(spec1["annotations"].(map[string]interface{}), "mco-disable-alerting") + _, updateErr1 := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Update(context.TODO(), mco1, metav1.UpdateOptions{}) + if updateErr1 != nil { + klog.Errorf("err: %+v\n", updateErr1) + } + klog.V(3).Infof("enable alert forward - spec1 is %s", spec1) }) JustAfterEach(func() { From b268dcd1bf15f3106205d518c3d986829ed63681 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 2 Mar 2023 16:20:45 +0800 Subject: [PATCH 052/150] remove existing or previous pullsecret to avoid make new installation failed Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_deploy.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index ecbb47f21..838f87a9e 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -771,6 +771,13 @@ func CreatePullSecret(opt TestOptions, mcoNs string) error { return errGet } + mcopSecret, errGet := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), name, metav1.GetOptions{}) + if mcopSecret != nil { + errDelGet := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if errGet != nil { + klog.V(1).Infof("Delete existing pullSecret - %s", errDelGet) + } + } pullSecret.ObjectMeta = metav1.ObjectMeta{ Name: name, Namespace: MCO_NAMESPACE, From 74f07a5876168b14d6ec3f96a130ad5931845838 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 2 Mar 2023 16:20:45 +0800 Subject: [PATCH 053/150] remove existing or previous pullsecret to avoid make new installation failed Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_deploy.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index ecbb47f21..838f87a9e 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -771,6 +771,13 @@ func CreatePullSecret(opt TestOptions, mcoNs string) error { return errGet } + mcopSecret, errGet := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), name, metav1.GetOptions{}) + if mcopSecret != nil { + errDelGet := clientKube.CoreV1().Secrets(MCO_NAMESPACE).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if errGet != nil { + klog.V(1).Infof("Delete existing pullSecret - %s", errDelGet) + } + } pullSecret.ObjectMeta = metav1.ObjectMeta{ Name: name, Namespace: MCO_NAMESPACE, From 7ffcf2c55d2851729f75287a5bf7088be7968c78 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Mar 2023 08:28:52 +0800 Subject: [PATCH 054/150] skip another unsupported case on the VMware Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_reconcile_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index ccffe5ad1..597c50050 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -43,6 +43,9 @@ var _ = Describe("", func() { }) It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + Skip("Skip the case due to it's not supported on the VMWARE") + } By("Modifying MCO CR for reconciling") err := utils.ModifyMCOCR(testOptions) Expect(err).ToNot(HaveOccurred()) From 7f0df4bb6c3d6e43f6b583afc66355d7d9fc1441 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Mar 2023 08:28:52 +0800 Subject: [PATCH 055/150] skip another unsupported case on the VMware Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_reconcile_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index ccffe5ad1..597c50050 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -43,6 +43,9 @@ var _ = Describe("", func() { }) It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + Skip("Skip the case due to it's not supported on the VMWARE") + } By("Modifying MCO CR for reconciling") err := utils.ModifyMCOCR(testOptions) Expect(err).ToNot(HaveOccurred()) From 86deef3acc412dce0f6e86483c6f7e1607ad79b9 Mon Sep 17 00:00:00 2001 From: Vincent Boulos Date: Thu, 16 Mar 2023 13:29:36 -0400 Subject: [PATCH 056/150] create Dockerfile and execute command file required by Openshft CI to integrate Obs --- Dockerfile.interop | 14 ++++++++ execute_obs_interop_commands.sh | 57 +++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 Dockerfile.interop create mode 100644 execute_obs_interop_commands.sh diff --git a/Dockerfile.interop b/Dockerfile.interop new file mode 100644 index 000000000..09a9bd9bd --- /dev/null +++ b/Dockerfile.interop @@ -0,0 +1,14 @@ +# Command to build this Dockerfile +# docker build -f Dockerfile -t quay.io/vboulos/acmqe-automation/obs:obs-ginkgo_1_14_2-linux-go . + +FROM quay.io/vboulos/acmqe-automation/ginkgo_1_14_2-linux-go + +# Copy the GRC repo repo into /tmp/grc folder +RUN mkdir /tmp/obs +WORKDIR /tmp/obs +COPY . . + +# good colors for most applications +ENV TERM=xterm + +CMD ["/bin/bash"] \ No newline at end of file diff --git a/execute_obs_interop_commands.sh b/execute_obs_interop_commands.sh new file mode 100644 index 000000000..71724562b --- /dev/null +++ b/execute_obs_interop_commands.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +export PARAM_AWS_SECRET_ACCESS_KEY=${PARAM_AWS_SECRET_ACCESS_KEY:-} +export PARAM_AWS_ACCESS_KEY_ID=${PARAM_AWS_ACCESS_KEY_ID:-} +export CLOUD_PROVIDER=${CLOUD_PROVIDER:-} +export OC_CLUSTER_USER=${OC_CLUSTER_USER:-} +export OC_HUB_CLUSTER_PASS=${OC_HUB_CLUSTER_PASS:-} +export OC_HUB_CLUSTER_API_URL=${OC_HUB_CLUSTER_API_URL:-} +export HUB_CLUSTER_NAME=${HUB_CLUSTER_NAME:-} +export BASE_DOMAIN=${BASE_DOMAIN:-} +export MANAGED_CLUSTER_NAME=${MANAGED_CLUSTER_NAME:-} +export MANAGED_CLUSTER_BASE_DOMAIN=${MANAGED_CLUSTER_BASE_DOMAIN:-} +export MANAGED_CLUSTER_USER=${MANAGED_CLUSTER_USER:-} +export MANAGED_CLUSTER_PASS=${MANAGED_CLUSTER_PASS:-} +export MANAGED_CLUSTER_API_URL=${MANAGED_CLUSTER_API_URL} +export BUCKET=${BUCKET:-'obs-v1'} +export REGION=${REGION:-'us-east-1'} +export USE_MINIO=${USE_MINIO:-'false'} +export SKIP_INSTALL_STEP=${SKIP_INSTALL_STEP:-'false'} +export SKIP_UNINSTALL_STEP=${SKIP_UNINSTALL_STEP:-'true'} + +if [[ -n ${PARAM_AWS_ACCESS_KEY_ID} ]]; then + export AWS_ACCESS_KEY_ID=${PARAM_AWS_ACCESS_KEY_ID} +fi + +if [[ -n ${PARAM_AWS_SECRET_ACCESS_KEY} ]]; then + export AWS_SECRET_ACCESS_KEY=${PARAM_AWS_SECRET_ACCESS_KEY} +fi + +if [[ ${!USE_MINIO} == false ]]; then + export IS_CANARY_ENV=true +fi + +if [[ -z ${HUB_CLUSTER_NAME} || -z ${BASE_DOMAIN} || -z ${OC_CLUSTER_USER} || -z ${OC_HUB_CLUSTER_PASS} || -z ${OC_HUB_CLUSTER_API_URL} ]]; then + echo "Aborting test.. OCP HUB details are required for the test execution" + exit 1 +else + if [[ -n ${MANAGED_CLUSTER_USER} && -n ${MANAGED_CLUSTER_PASS} && -n ${MANAGED_CLUSTER_API_URL} ]]; then + oc login --insecure-skip-tls-verify -u $MANAGED_CLUSTER_USER -p $MANAGED_CLUSTER_PASS $MANAGED_CLUSTER_API_URL + oc config view --minify --raw=true > ~/.kube/managed_kubeconfig + export MAKUBECONFIG=~/.kube/managed_kubeconfig + fi + set +x + oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL + set -x + export KUBECONFIG=~/.kube/config + go mod vendor && ginkgo build ./tests/pkg/tests/ + cd tests + cp resources/options.yaml.template resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.name="'"$HUB_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.baseDomain="'"$BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.name="'"$MANAGED_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"$MAKUBECONFIG"'"' resources/options.yaml + cat resources/options.yaml + ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 +fi \ No newline at end of file From ad9089c1ff0779bb1a2598b88175b5b3fb14b51b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 7 Apr 2023 16:14:01 +0800 Subject: [PATCH 057/150] new automation case for 30645 Signed-off-by: Chang Liang Qu --- .../cluster-monitoring-config/README.md | 3 +++ .../cluster-monitoring-config.yaml | 9 +++++++++ .../kustomization.yaml | 4 ++++ .../tests/observability_deployment_test.go | 19 +++++++++++++++++++ tests/pkg/tests/observability_install_test.go | 12 ++++++++++++ 5 files changed, 47 insertions(+) create mode 100644 examples/configmapcmc/cluster-monitoring-config/README.md create mode 100644 examples/configmapcmc/cluster-monitoring-config/cluster-monitoring-config.yaml create mode 100644 examples/configmapcmc/cluster-monitoring-config/kustomization.yaml diff --git a/examples/configmapcmc/cluster-monitoring-config/README.md b/examples/configmapcmc/cluster-monitoring-config/README.md new file mode 100644 index 000000000..6dfd856e0 --- /dev/null +++ b/examples/configmapcmc/cluster-monitoring-config/README.md @@ -0,0 +1,3 @@ +# ConfigMap cluster-monitoring-config + + diff --git a/examples/configmapcmc/cluster-monitoring-config/cluster-monitoring-config.yaml b/examples/configmapcmc/cluster-monitoring-config/cluster-monitoring-config.yaml new file mode 100644 index 000000000..4e3e7dee9 --- /dev/null +++ b/examples/configmapcmc/cluster-monitoring-config/cluster-monitoring-config.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: cluster-monitoring-config + namespace: openshift-monitoring +data: + config.yaml: | + alertmanagerMain: + enableUserAlertmanagerConfig: true \ No newline at end of file diff --git a/examples/configmapcmc/cluster-monitoring-config/kustomization.yaml b/examples/configmapcmc/cluster-monitoring-config/kustomization.yaml new file mode 100644 index 000000000..a4a8a1fc2 --- /dev/null +++ b/examples/configmapcmc/cluster-monitoring-config/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- cluster-monitoring-config.yaml diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 5f3a6b5c0..d4aff3750 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -5,6 +5,8 @@ package tests import ( "context" + "os" + "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -66,6 +68,23 @@ var _ = Describe("", func() { }) + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable] (deployment/g1)", func() { + By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") + if os.Getenv("SKIP_INSTALL_STEP") == "true" { + Skip("Skip the case due to this case is only available before MCOCR deployment") + } + Eventually(func() bool { + + cm, err := hubClient.CoreV1().ConfigMaps("openshift-monitoring").Get(context.TODO(), "cluster-monitoring-config", metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + if strings.Contains(cm.String(), "enableUserAlertmanagerConfig: true") { + return true + } + return false + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) + }) + AfterEach(func() { if CurrentGinkgoTestDescription().Failed { utils.PrintMCOObject(testOptions) diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go index 263cb432e..bf69d2b2c 100644 --- a/tests/pkg/tests/observability_install_test.go +++ b/tests/pkg/tests/observability_install_test.go @@ -33,6 +33,18 @@ func installMCO() { testOptions.KubeConfig, testOptions.HubCluster.KubeContext) + By("Deploy CM cluster-monitoring-config") + + yamlBc, _ := kustomize.Render( + kustomize.Options{KustomizationPath: "../../../examples/configmapcmc/cluster-monitoring-config"}, + ) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlBc)).NotTo(HaveOccurred()) + By("Checking MCO operator is started up and running") podList, err := hubClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: MCO_LABEL}) Expect(len(podList.Items)).To(Equal(1)) From 950a04da0ec45daa3035395072566b1c01922f50 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 11 Apr 2023 14:24:19 +0800 Subject: [PATCH 058/150] auto test case 31474 and 31475 Signed-off-by: Chang Liang Qu --- examples/maxitemsize/updatemcocr/README.md | 3 + .../updatemcocr/kustomization.yaml | 4 + .../v1beta2-observability-maxitemsize.yaml | 124 ++++++++++++++++++ tests/pkg/tests/observability_config_test.go | 81 ++++++++++++ 4 files changed, 212 insertions(+) create mode 100644 examples/maxitemsize/updatemcocr/README.md create mode 100644 examples/maxitemsize/updatemcocr/kustomization.yaml create mode 100644 examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml diff --git a/examples/maxitemsize/updatemcocr/README.md b/examples/maxitemsize/updatemcocr/README.md new file mode 100644 index 000000000..6dfd856e0 --- /dev/null +++ b/examples/maxitemsize/updatemcocr/README.md @@ -0,0 +1,3 @@ +# ConfigMap cluster-monitoring-config + + diff --git a/examples/maxitemsize/updatemcocr/kustomization.yaml b/examples/maxitemsize/updatemcocr/kustomization.yaml new file mode 100644 index 000000000..4d25caf93 --- /dev/null +++ b/examples/maxitemsize/updatemcocr/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- v1beta2-observability-maxitemsize.yaml diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml new file mode 100644 index 000000000..7f6e636ec --- /dev/null +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -0,0 +1,124 @@ +apiVersion: observability.open-cluster-management.io/v1beta2 +kind: MultiClusterObservability +metadata: + name: observability + annotations: +spec: + advanced: + retentionConfig: + blockDuration: 3h + cleanupInterval: 6m + deleteDelay: 50h + retentionInLocal: 5d + retentionResolution1h: 31d + retentionResolution5m: 15d + retentionResolutionRaw: 6d + observatoriumAPI: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + queryFrontend: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + query: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + compact: + resources: + limits: + cpu: 1 + memory: 2Gi + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + receive: + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + rule: + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 1 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + store: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + serviceAccountAnnotations: + test.com/role-arn: 's3_role' + storeMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 2 + memoryLimitMb: 4096 + maxItemSize: 10m + connectionLimit: 2048 + queryFrontendMemcached: + resources: + limits: + cpu: 1 + memory: 2Gi + replicas: 3 + memoryLimitMb: 4096 + maxItemSize: 10m + connectionLimit: 2048 + grafana: + replicas: 3 + resources: + limits: + cpu: 1 + memory: 1Gi + alertmanager: + replicas: 2 + resources: + limits: + cpu: 100m + memory: 400Mi + rbacQueryProxy: + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi + nodeSelector: + kubernetes.io/os: linux + observabilityAddonSpec: + enableMetrics: true + interval: 30 + resources: + limits: + cpu: 200m + memory: 700Mi + requests: + cpu: 10m + memory: 100Mi + storageConfig: + alertmanagerStorageSize: 2Gi + compactStorageSize: 2Gi + metricObjectStorage: + key: thanos.yaml + name: thanos-object-storage + receiveStorageSize: 2Gi + ruleStorageSize: 2Gi + storageClass: gp2 + storeStorageSize: 2Gi diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 10f34af52..e6c08c5f9 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -6,6 +6,8 @@ package tests import ( "context" "fmt" + "strings" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -13,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" + "github.com/stolostron/multicluster-observability-operator/tests/pkg/kustomize" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) @@ -29,6 +32,84 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable](config/g1)", func() { + + By("Updating mco cr to update values in storeMemcached") + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) + Expect(err).ToNot(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) + + time.Sleep(60 * time.Second) + + By("Check the value is effect in the sts observability-thanos-store-shard-0") + Eventually(func() bool { + + thanosStoreMemSts, _ := utils.GetStatefulSet(testOptions, true, "observability-thanos-store-memcached", MCO_NAMESPACE) + //klog.V(3).Infof("STS thanosStoreSts is %s", thanosStoreMemSts) + containers := thanosStoreMemSts.Spec.Template.Spec.Containers + + args := containers[0].Args + //klog.V(3).Infof("args is %s", args) + + argsStr := strings.Join(args, " ") + //klog.V(3).Infof("argsStr is %s", argsStr) + + if !strings.Contains(argsStr, "-I 10m") { + klog.V(3).Infof("maxItemSize is not effect in sts observability-thanos-store-memcached") + return false + } + + klog.V(3).Infof("maxItemSize is effect in sts observability-thanos-store-memcached") + return true + + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) + }) + + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable](config/g1)", func() { + + By("Updating mco cr to update values in storeMemcached") + yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) + Expect(err).ToNot(HaveOccurred()) + Expect( + utils.Apply( + testOptions.HubCluster.ClusterServerURL, + testOptions.KubeConfig, + testOptions.HubCluster.KubeContext, + yamlB, + )).NotTo(HaveOccurred()) + + time.Sleep(60 * time.Second) + + By("Check the value is effect in the sts observability-thanos-store-shard-0") + Eventually(func() bool { + + thanosQueFronMemSts, _ := utils.GetStatefulSet(testOptions, true, "observability-thanos-query-frontend-memcached", MCO_NAMESPACE) + //klog.V(3).Infof("STS thanosStoreSts is %s", thanosQueFronMemSts) + containers := thanosQueFronMemSts.Spec.Template.Spec.Containers + + args := containers[0].Args + //klog.V(3).Infof("args is %s", args) + + argsStr := strings.Join(args, " ") + //klog.V(3).Infof("argsStr is %s", argsStr) + + if !strings.Contains(argsStr, "-I 10m") { + klog.V(3).Infof("maxItemSize is not effect in sts observability-thanos-query-frontend-memcached") + return false + } + + klog.V(3).Infof("maxItemSize is effect in sts observability-thanos-query-frontend-memcached") + return true + + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) + }) + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable](config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { From 04090033ab7e130cbd3732a4a230176fb4bf2154 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 21 Apr 2023 11:20:40 +0800 Subject: [PATCH 059/150] remove result files which are not useable Signed-off-by: Chang Liang Qu --- tests/pkg/tests/results.xml.arm | 98 -------------------------------- tests/pkg/tests/results.xml.fips | 94 ------------------------------ tests/pkg/tests/results.xml.rhv | 59 ------------------- tests/pkg/tests/results.xml.sno | 90 ----------------------------- 4 files changed, 341 deletions(-) delete mode 100644 tests/pkg/tests/results.xml.arm delete mode 100644 tests/pkg/tests/results.xml.fips delete mode 100644 tests/pkg/tests/results.xml.rhv delete mode 100644 tests/pkg/tests/results.xml.sno diff --git a/tests/pkg/tests/results.xml.arm b/tests/pkg/tests/results.xml.arm deleted file mode 100644 index 8e80217de..000000000 --- a/tests/pkg/tests/results.xml.arm +++ /dev/null @@ -1,98 +0,0 @@ - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:155 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:165 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:192 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:200 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:247 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00059e610>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 - �[1mSTEP�[0m: Check addon resource requirement - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc0006414f0>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 - �[1mSTEP�[0m: Check metrics-collector resource requirement - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:119 Timed out after 1200.001s. Expected success, but got an error: <*errors.StatusError | 0xc0002d68c0>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "observabilityaddons.observability.open-cluster-management.io \"observability-addon\" not found", Reason: "NotFound", Details: { Name: "observability-addon", Group: "observability.open-cluster-management.io", Kind: "observabilityaddons", UID: "", Causes: nil, RetryAfterSeconds: 0, }, Code: 404, }, } observabilityaddons.observability.open-cluster-management.io "observability-addon" not found /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:145 - �[1mSTEP�[0m: Waiting for MCO addon components ready �[1mSTEP�[0m: Checking the status in managedclusteraddon reflects the endpoint operator status correctly - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.002s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:278 Timed out after 300.001s. Expected success, but got an error: <*errors.errorString | 0xc000631260>: { s: "Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster", } Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:377 - �[1mSTEP�[0m: Checking Watchdog alerts are forwarded to the hub - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_test.go:28 Timed out after 360.068s. Expected success, but got an error: <*errors.errorString | 0xc000be8a70>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_test.go:46 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:51 Timed out after 600.000s. Expected success, but got an error: <*errors.errorString | 0xc000073590>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:77 - �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00026d020>: { ProcessState: { pid: 62559, status: 256, rusage: { Utime: { Sec: 0, Usec: 437719, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 195890, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35835904, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36535, Majflt: 3161, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 202, Nvcsw: 92, Nivcsw: 5849, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:43 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:51 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:99 Skip the case since the MCO CR did not set the nodeSelector /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:107 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:197 Unexpected error: <*errors.errorString | 0xc0006a74f0>: { s: "statefulset observability-alertmanager should have 3 but got 0 ready replicas", } statefulset observability-alertmanager should have 3 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:198 - �[1mSTEP�[0m: Checking podAntiAffinity for all pods - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:197 Unexpected error: <*errors.errorString | 0xc0005144b0>: { s: "statefulset observability-alertmanager should have 3 but got 0 ready replicas", } statefulset observability-alertmanager should have 3 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:198 - �[1mSTEP�[0m: Resizing alertmanager storage - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:150 - - \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.fips b/tests/pkg/tests/results.xml.fips deleted file mode 100644 index 4d1441949..000000000 --- a/tests/pkg/tests/results.xml.fips +++ /dev/null @@ -1,94 +0,0 @@ - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:51 Timed out after 636.857s. Expected success, but got an error: <*errors.errorString | 0xc0005d00b0>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:77 - �[1mSTEP�[0m: Adding custom metrics allowlist configmap �[1mSTEP�[0m: Waiting for new added metrics on grafana console - - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:60 Timed out after 60.001s. Expected success, but got an error: <*errors.StatusError | 0xc0005403c0>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "Internal error occurred: failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Reason: "InternalError", Details: { Name: "", Group: "", Kind: "", UID: "", Causes: [ { Type: "", Message: "failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Field: "", }, ], RetryAfterSeconds: 0, }, Code: 500, }, } Internal error occurred: failed calling webhook "vmulticlusterobservability.observability.open-cluster-management.io": Post "https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s": no endpoints available for service "multicluster-observability-webhook-service" /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:63 - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.000s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00098d760>: { ProcessState: { pid: 64306, status: 256, rusage: { Utime: { Sec: 0, Usec: 373516, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 148181, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35418112, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36214, Majflt: 3139, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 162, Nvcsw: 57, Nivcsw: 3449, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:131 Timed out after 300.001s. Expected success, but got an error: <*errors.errorString | 0xc000619a60>: { s: "the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI}", } the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI} /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:144 - �[1mSTEP�[0m: Resizing alertmanager storage - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Unexpected error: <*errors.StatusError | 0xc0007f0280>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "Internal error occurred: failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Reason: "InternalError", Details: { Name: "", Group: "", Kind: "", UID: "", Causes: [ { Type: "", Message: "failed calling webhook \"vmulticlusterobservability.observability.open-cluster-management.io\": Post \"https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s\": no endpoints available for service \"multicluster-observability-webhook-service\"", Field: "", }, ], RetryAfterSeconds: 0, }, Code: 500, }, } Internal error occurred: failed calling webhook "vmulticlusterobservability.observability.open-cluster-management.io": Post "https://multicluster-observability-webhook-service.ocm.svc:443/validate-observability-open-cluster-management-io-v1beta2-multiclusterobservability?timeout=10s": no endpoints available for service "multicluster-observability-webhook-service" occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:154 - �[1mSTEP�[0m: Revert MCO CR changes - - - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:278 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc000348860>: { s: "Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster", } Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_alert_test.go:377 - �[1mSTEP�[0m: Checking Watchdog alerts are forwarded to the hub - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Test Panicked /Users/cqu/go/src/runtime/iface.go:261 Panic: interface conversion: interface {} is nil, not map[string]interface {} Full stack: github.com/stolostron/multicluster-observability-operator/tests/pkg/tests.glob..func6.6() /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:254 +0x7bc github.com/onsi/ginkgo/internal/leafnodes.(*runner).runSync(0x0) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/runner.go:113 +0xba github.com/onsi/ginkgo/internal/leafnodes.(*runner).run(0x0) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/runner.go:64 +0x125 github.com/onsi/ginkgo/internal/leafnodes.(*ItNode).Run(0xc000a3a270) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/leafnodes/it_node.go:26 +0x7b github.com/onsi/ginkgo/internal/spec.(*Spec).runSample(0xc000365770, 0xc000c779f0, {0x5804400, 0xc0000a0900}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/spec/spec.go:215 +0x2a9 github.com/onsi/ginkgo/internal/spec.(*Spec).Run(0xc000365770, {0x5804400, 0xc0000a0900}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/spec/spec.go:138 +0xe7 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpec(0xc0003cd080, 0xc000365770) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:200 +0xe5 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).runSpecs(0xc0003cd080) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:170 +0x1a5 github.com/onsi/ginkgo/internal/specrunner.(*SpecRunner).Run(0xc0003cd080) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/specrunner/spec_runner.go:66 +0xc5 github.com/onsi/ginkgo/internal/suite.(*Suite).Run(0xc0000c6cb0, {0x91b22f0, 0xc00037d1e0}, {0x55e738d, 0x1}, {0xc0001cb7a0, 0x2, 0x2}, {0x58527d8, 0xc0000a0900}, ...) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/internal/suite/suite.go:79 +0x4d2 github.com/onsi/ginkgo.runSpecsWithCustomReporters({0x5805de0, 0xc00037d1e0}, {0x55e738d, 0x17}, {0xc0001cb780, 0x2, 0x55e97da}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/ginkgo_dsl.go:238 +0x185 github.com/onsi/ginkgo.RunSpecsWithDefaultAndCustomReporters({0x5805de0, 0xc00037d1e0}, {0x55e738d, 0x17}, {0xc00005d750, 0x1, 0x1}) /Users/cqu/gopath/pkg/mod/github.com/onsi/ginkgo@v1.16.4/ginkgo_dsl.go:221 +0x1be github.com/stolostron/multicluster-observability-operator/tests/pkg/tests.TestObservabilityE2E(0x0) /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability-e2e-test_suite_test.go:129 +0x10a testing.tRunner(0xc00037d1e0, 0x56a6368) /Users/cqu/go/src/testing/testing.go:1259 +0x102 created by testing.(*T).Run /Users/cqu/go/src/testing/testing.go:1306 +0x35a - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_certrenew_test.go:30 Timed out after 371.950s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_certrenew_test.go:138 - �[1mSTEP�[0m: Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment �[1mSTEP�[0m: Deleting certificate secret to simulate certificate renew �[1mSTEP�[0m: Waiting for old pods removed: [observability-observatorium-api-59cc57bc7-76tln observability-observatorium-api-59cc57bc7-cgxgg observability-observatorium-api-59cc57bc7-gchbx observability-rbac-query-proxy-7f5c96596f-ccn9j observability-rbac-query-proxy-7f5c96596f-lm9bv observability-rbac-query-proxy-7f5c96596f-whnfv] and new pods created - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:30 Timed out after 180.001s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:69 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc000a628b0>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc0008cec00>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc00054ca70>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 - �[1mSTEP�[0m: Deleting metrics-collector-view clusterolebinding �[1mSTEP�[0m: Updating metrics-collector-view clusterolebinding - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:198 Unexpected error: <*errors.errorString | 0xc0005d06c0>: { s: "deployment observability-observatorium-api should have 3 but got 2 ready replicas", } deployment observability-observatorium-api should have 3 but got 2 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_endpoint_preserve_test.go:199 - �[1mSTEP�[0m: Deleting metrics-collector-serving-certs-ca-bundle configmap - - \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.rhv b/tests/pkg/tests/results.xml.rhv deleted file mode 100644 index 3c0e71480..000000000 --- a/tests/pkg/tests/results.xml.rhv +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.000s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc003547d00>: { ProcessState: { pid: 13278, status: 256, rusage: { Utime: { Sec: 0, Usec: 346718, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 123883, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35676160, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 36073, Majflt: 3140, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 171, Nvcsw: 70, Nivcsw: 3466, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 - - - - - - - - - - - - - - \ No newline at end of file diff --git a/tests/pkg/tests/results.xml.sno b/tests/pkg/tests/results.xml.sno deleted file mode 100644 index bbc65cb64..000000000 --- a/tests/pkg/tests/results.xml.sno +++ /dev/null @@ -1,90 +0,0 @@ - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:41 Unexpected error: <*errors.errorString | 0xc00082a7d0>: { s: "the MCO CR did not have observabilityAddonSpec.resources spec configed", } the MCO CR did not have observabilityAddonSpec.resources spec configed occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:44 - �[1mSTEP�[0m: Check addon resource requirement - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:53 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc0009d0480>: { s: "metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}>", } metrics-collector-deployment resource <{map[] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> is not equal <{map[cpu:{{200 -3} {<nil>} 200m DecimalSI} memory:{{734003200 0} {<nil>} 700Mi BinarySI}] map[cpu:{{10 -3} {<nil>} 10m DecimalSI} memory:{{104857600 0} {<nil>} 100Mi BinarySI}]}> /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:57 - �[1mSTEP�[0m: Check metrics-collector resource requirement - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:174 Timed out after 300.001s. Expected <bool>: false to be true /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_addon_test.go:186 - �[1mSTEP�[0m: Waiting for MCO addon components scales to 0 - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:103 Unexpected error: <*errors.errorString | 0xc0006502e0>: { s: "statefulset observability-thanos-compact should have 1 but got 0 ready replicas", } statefulset observability-thanos-compact should have 1 but got 0 ready replicas occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_observatorium_preserve_test.go:104 - �[1mSTEP�[0m: Wait for thanos compact pods are ready - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:33 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:35 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:47 Skip the case due to MCO CR was created customized /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:49 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:155 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:165 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:192 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:200 - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:237 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_config_test.go:247 - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:142 Timed out after 121.190s. Expected success, but got an error: <*errors.errorString | 0xc00078c380>: { s: "Failed to find metric name from response", } Failed to find metric name from response /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_metrics_test.go:162 - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:43 Unexpected error: <*errors.StatusError | 0xc000978460>: { ErrStatus: { TypeMeta: {Kind: "Status", APIVersion: "v1"}, ListMeta: { SelfLink: "", ResourceVersion: "", Continue: "", RemainingItemCount: nil, }, Status: "Failure", Message: "admission webhook \"vmulticlusterobservability.observability.open-cluster-management.io\" denied the request: MultiClusterObservability.observability.open-cluster-management.io \"observability\" is invalid: spec.storageConfig.alertmanagerStorageSize: Forbidden: is forbidden to update.", Reason: "Invalid", Details: { Name: "observability", Group: "observability.open-cluster-management.io", Kind: "MultiClusterObservability", UID: "", Causes: [ { Type: "FieldValueForbidden", Message: "Forbidden: is forbidden to update.", Field: "spec.storageConfig.alertmanagerStorageSize", }, ], RetryAfterSeconds: 0, }, Code: 422, }, } admission webhook "vmulticlusterobservability.observability.open-cluster-management.io" denied the request: MultiClusterObservability.observability.open-cluster-management.io "observability" is invalid: spec.storageConfig.alertmanagerStorageSize: Forbidden: is forbidden to update. occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:46 - �[1mSTEP�[0m: Modifying MCO CR for reconciling - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:99 Skip the case since the MCO CR did not set the nodeSelector /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:107 - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:131 Timed out after 300.000s. Expected success, but got an error: <*errors.errorString | 0xc00082a390>: { s: "the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI}", } the storage size of statefulset observability-alertmanager should have 2Gi but got {{1073741824 0} {<nil>} 1Gi BinarySI} /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:144 - �[1mSTEP�[0m: Resizing alertmanager storage - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:147 Skip the case since the MCO CR did not have advanced spec configed /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_reconcile_test.go:150 - - - - - - - - - - - - - - - - - - - - /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:21 Unexpected error: <*exec.ExitError | 0xc00069b740>: { ProcessState: { pid: 63722, status: 256, rusage: { Utime: { Sec: 0, Usec: 472569, Pad_cgo_0: [0, 0, 0, 0], }, Stime: { Sec: 0, Usec: 203915, Pad_cgo_0: [0, 0, 0, 0], }, Maxrss: 35209216, Ixrss: 0, Idrss: 0, Isrss: 0, Minflt: 39035, Majflt: 16, Nswap: 0, Inblock: 0, Oublock: 0, Msgsnd: 0, Msgrcv: 0, Nsignals: 844, Nvcsw: 61, Nivcsw: 6457, }, }, Stderr: nil, } exit status 1 occurred /Users/cqu/ACM/obs/observability_core_automation/tests/pkg/tests/observability_grafana_dev_test.go:27 - - \ No newline at end of file From 8fe169fbdb4dedd1b81f2a7f238ce5965c93dfa1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 5 May 2023 08:22:43 +0800 Subject: [PATCH 060/150] udpate receive replica to 3 and remove limitation Signed-off-by: Chang Liang Qu --- examples/mco/e2e/v1beta2/custom-certs/observability.yaml | 6 +----- examples/mco/e2e/v1beta2/observability.yaml | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index a1f1f3951..8126c315c 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -41,11 +41,7 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 13588d5b4..2f89cad22 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -41,11 +41,7 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: From 85e4f753d4cb5b04127d596d16e6ba658dff90b3 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 5 May 2023 08:27:22 +0800 Subject: [PATCH 061/150] update receive replica to 3 and remove limitation2 Signed-off-by: Chang Liang Qu --- examples/export/v1beta2/custom-certs/observability.yaml | 6 +----- examples/export/v1beta2/observability.yaml | 6 +----- .../updatemcocr/v1beta2-observability-maxitemsize.yaml | 6 +----- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index da76fef20..2c3d38c71 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -41,11 +41,7 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index 7eec99531..ce3763ee1 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -41,11 +41,7 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 7f6e636ec..054d3103f 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -41,11 +41,7 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - resources: - limits: - cpu: 1 - memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: From 9177d73ae48aec39db028afa045933548d70029f Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 11 May 2023 18:24:46 +0800 Subject: [PATCH 062/150] reduce some deployment component replica to 2 Signed-off-by: Chang Liang Qu --- .../v1beta2/custom-certs/observability.yaml | 18 +++--------------- examples/export/v1beta2/observability.yaml | 18 +++--------------- .../v1beta2-observability-maxitemsize.yaml | 18 +++--------------- .../v1beta2/custom-certs/observability.yaml | 18 +++--------------- examples/mco/e2e/v1beta2/observability.yaml | 18 +++--------------- 5 files changed, 15 insertions(+), 75 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index 2c3d38c71..d9e711dc6 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -20,17 +20,9 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -91,11 +83,7 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi + replicas: 2 nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index ce3763ee1..c8049d9a2 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -20,17 +20,9 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -91,11 +83,7 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi + replicas: 2 nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 054d3103f..5569d0c63 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -20,17 +20,9 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -91,11 +83,7 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi + replicas: 2 nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 8126c315c..823ce6c39 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -20,17 +20,9 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -91,11 +83,7 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi + replicas: 2 nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 2f89cad22..5fe8410d6 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -20,17 +20,9 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 query: - resources: - limits: - cpu: 1 - memory: 1Gi - replicas: 3 + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -91,11 +83,7 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 3 - resources: - limits: - cpu: 50m - memory: 200Mi + replicas: 2 nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: From e7eac3e58198ed993a70ed2173bd427b2256c3fc Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 16 May 2023 08:57:31 +0800 Subject: [PATCH 063/150] extend resource quota to avoid touch limitation Signed-off-by: Chang Liang Qu --- examples/policy/resourceQuota.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/policy/resourceQuota.yaml b/examples/policy/resourceQuota.yaml index 1c439b434..d880b4909 100644 --- a/examples/policy/resourceQuota.yaml +++ b/examples/policy/resourceQuota.yaml @@ -6,4 +6,4 @@ metadata: spec: hard: cpu: "5.25" - memory: "12Gi" + memory: "15Gi" From 89bf6f7774bd80c57d8448affe089225022df8f9 Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Fri, 19 May 2023 11:27:50 +0800 Subject: [PATCH 064/150] Revert "[main] Receive replica limit" --- examples/export/v1beta2/custom-certs/observability.yaml | 6 +++++- examples/export/v1beta2/observability.yaml | 6 +++++- .../updatemcocr/v1beta2-observability-maxitemsize.yaml | 6 +++++- examples/mco/e2e/v1beta2/custom-certs/observability.yaml | 6 +++++- examples/mco/e2e/v1beta2/observability.yaml | 6 +++++- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index d9e711dc6..ca6f13db2 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -33,7 +33,11 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - replicas: 3 + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index c8049d9a2..474f16a88 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -33,7 +33,11 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - replicas: 3 + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 5569d0c63..2564fe67e 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -33,7 +33,11 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - replicas: 3 + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 823ce6c39..c29b753b0 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -33,7 +33,11 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - replicas: 3 + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 5fe8410d6..11b7d524c 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -33,7 +33,11 @@ spec: serviceAccountAnnotations: test.com/role-arn: 's3_role' receive: - replicas: 3 + resources: + limits: + cpu: 1 + memory: 4Gi + replicas: 2 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: From 63542a7e855d56bbcd496f3f8f82cf3543dd8cd8 Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Fri, 19 May 2023 11:30:21 +0800 Subject: [PATCH 065/150] Revert "reduce some deployment component replica to 2" --- .../v1beta2/custom-certs/observability.yaml | 18 +++++++++++++++--- examples/export/v1beta2/observability.yaml | 18 +++++++++++++++--- .../v1beta2-observability-maxitemsize.yaml | 18 +++++++++++++++--- .../v1beta2/custom-certs/observability.yaml | 18 +++++++++++++++--- examples/mco/e2e/v1beta2/observability.yaml | 18 +++++++++++++++--- 5 files changed, 75 insertions(+), 15 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index ca6f13db2..da76fef20 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -20,9 +20,17 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 query: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -87,7 +95,11 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 2 + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index 474f16a88..7eec99531 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -20,9 +20,17 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 query: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -87,7 +95,11 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 2 + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 2564fe67e..7f6e636ec 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -20,9 +20,17 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 query: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -87,7 +95,11 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 2 + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index c29b753b0..a1f1f3951 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -20,9 +20,17 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 query: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -87,7 +95,11 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 2 + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 11b7d524c..13588d5b4 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -20,9 +20,17 @@ spec: memory: 1Gi replicas: 3 queryFrontend: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 query: - replicas: 2 + resources: + limits: + cpu: 1 + memory: 1Gi + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' compact: @@ -87,7 +95,11 @@ spec: cpu: 100m memory: 400Mi rbacQueryProxy: - replicas: 2 + replicas: 3 + resources: + limits: + cpu: 50m + memory: 200Mi nodeSelector: kubernetes.io/os: linux observabilityAddonSpec: From 7cb669be5e044f1d54f338d52e523e0ab127e170 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 30 May 2023 11:45:14 +0800 Subject: [PATCH 066/150] update files for main CI Signed-off-by: Chang Liang Qu --- Dockerfile.interop | 19 ++++++++++++++++++- execute_obs_interop_commands.sh | 22 ++++++++++++++++------ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/Dockerfile.interop b/Dockerfile.interop index 09a9bd9bd..73efd12fd 100644 --- a/Dockerfile.interop +++ b/Dockerfile.interop @@ -3,7 +3,8 @@ FROM quay.io/vboulos/acmqe-automation/ginkgo_1_14_2-linux-go -# Copy the GRC repo repo into /tmp/grc folder + +# Copy the Obs repo repo into /tmp/obs folder RUN mkdir /tmp/obs WORKDIR /tmp/obs COPY . . @@ -11,4 +12,20 @@ COPY . . # good colors for most applications ENV TERM=xterm +# Set required permissions for OpenShift usage +RUN chgrp -R 0 /tmp && \ + chmod -R g=u /tmp + +RUN mkdir -p /go +RUN chgrp -R 0 /go && \ + chmod -R g=u /go + +RUN mkdir -p ~/.kube +RUN chgrp -R 0 ~/.kube && \ + chmod -R g=u ~/.kube + +RUN mkdir -p /alabama/.kube +RUN chgrp -R 0 /alabama/.kube && \ + chmod -R g=u /alabama/.kube + CMD ["/bin/bash"] \ No newline at end of file diff --git a/execute_obs_interop_commands.sh b/execute_obs_interop_commands.sh index 71724562b..04989512c 100644 --- a/execute_obs_interop_commands.sh +++ b/execute_obs_interop_commands.sh @@ -27,9 +27,11 @@ if [[ -n ${PARAM_AWS_SECRET_ACCESS_KEY} ]]; then export AWS_SECRET_ACCESS_KEY=${PARAM_AWS_SECRET_ACCESS_KEY} fi -if [[ ${!USE_MINIO} == false ]]; then - export IS_CANARY_ENV=true -fi +# if [[ ${!USE_MINIO} == "false" ]]; then +# export IS_CANARY_ENV=true +# fi + +export IS_CANARY_ENV=true if [[ -z ${HUB_CLUSTER_NAME} || -z ${BASE_DOMAIN} || -z ${OC_CLUSTER_USER} || -z ${OC_HUB_CLUSTER_PASS} || -z ${OC_HUB_CLUSTER_API_URL} ]]; then echo "Aborting test.. OCP HUB details are required for the test execution" @@ -41,9 +43,17 @@ else export MAKUBECONFIG=~/.kube/managed_kubeconfig fi set +x - oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL - set -x + oc login --insecure-skip-tls-verify -u $OC_CLUSTER_USER -p $OC_HUB_CLUSTER_PASS $OC_HUB_CLUSTER_API_URL + set -x + + oc config view --minify --raw=true > userfile + //cat userfile + whoami + rm -rf ~/.kube/config + cp userfile ~/.kube/config + //cat ~/.kube/config export KUBECONFIG=~/.kube/config + go mod vendor && ginkgo build ./tests/pkg/tests/ cd tests cp resources/options.yaml.template resources/options.yaml @@ -54,4 +64,4 @@ else /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"$MAKUBECONFIG"'"' resources/options.yaml cat resources/options.yaml ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 -fi \ No newline at end of file +fi From 2045f2a8c10b8f815a92abc54e23de64b4c7a31c Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 6 Jun 2023 11:01:16 +0800 Subject: [PATCH 067/150] rename to junit_results.xml for CI Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability-e2e-test_suite_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index c982de04c..93f7f4f0a 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -106,7 +106,7 @@ func init() { flag.StringVar( &reportFile, "report-file", - "results.xml", + "junit_results.xml", "Provide the path to where the junit results will be printed.", ) flag.StringVar( From a5b1fe274fc1a89892dedbf8a8c60aa52cd31d54 Mon Sep 17 00:00:00 2001 From: ChangLiang Qu Date: Thu, 8 Jun 2023 14:32:16 +0800 Subject: [PATCH 068/150] Update OWNERS --- OWNERS | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/OWNERS b/OWNERS index 519e50c83..84398a5cc 100644 --- a/OWNERS +++ b/OWNERS @@ -1,8 +1,5 @@ approvers: -- clyang82 -- marcolan018 -- morvencao -- songleo +- quchangl-github reviewers: - haoqing0110 From eb5fbdabb74c4ac604ba328ad9ab9290bb8926c3 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 25 Jun 2023 11:08:21 +0800 Subject: [PATCH 069/150] remove sensistive info Signed-off-by: Chang Liang Qu --- Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 5bb274fbb..c67ba127d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -70,11 +70,15 @@ pipeline { exit 1 else if [[ -n "${params.MANAGED_CLUSTER_USER}" && -n "${params.MANAGED_CLUSTER_PASS}" && -n "${params.MANAGED_CLUSTER_API_URL}" ]]; then + set +x oc login --insecure-skip-tls-verify -u \$MANAGED_CLUSTER_USER -p \$MANAGED_CLUSTER_PASS \$MANAGED_CLUSTER_API_URL + set -x oc config view --minify --raw=true > ~/.kube/managed_kubeconfig export MAKUBECONFIG=~/.kube/managed_kubeconfig fi + set +x oc login --insecure-skip-tls-verify -u \$OC_CLUSTER_USER -p \$OC_HUB_CLUSTER_PASS \$OC_HUB_CLUSTER_API_URL + set -x export KUBECONFIG=~/.kube/config go mod vendor && ginkgo build ./tests/pkg/tests/ cd tests From 6009c891021b1f0a95336ec40a3ca10862946ed4 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 25 Jun 2023 15:12:34 +0800 Subject: [PATCH 070/150] remove further password information Signed-off-by: Chang Liang Qu --- Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index c67ba127d..533c6b79c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -39,14 +39,18 @@ pipeline { sh """ export CLOUD_PROVIDER="${params.CLOUD_PROVIDER}" export OC_CLUSTER_USER="${params.OC_CLUSTER_USER}" + set +x export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" + set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" export BASE_DOMAIN="${params.BASE_DOMAIN}" export MANAGED_CLUSTER_NAME="${params.MANAGED_CLUSTER_NAME}" export MANAGED_CLUSTER_BASE_DOMAIN="${params.MANAGED_CLUSTER_BASE_DOMAIN}" export MANAGED_CLUSTER_USER="${params.MANAGED_CLUSTER_USER}" + set +x export MANAGED_CLUSTER_PASS="${params.MANAGED_CLUSTER_PASS}" + set -x export MANAGED_CLUSTER_API_URL="${params.MANAGED_CLUSTER_API_URL}" export BUCKET="${params.BUCKET}" export REGION="${params.REGION}" From 9af7e12646f2ab495942a5d19e644b7411f48536 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 29 Jun 2023 13:51:14 +0800 Subject: [PATCH 071/150] increase grafana-dev retry time period Signed-off-by: Chang Liang Qu --- tests/grafana-dev-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/grafana-dev-test.sh b/tests/grafana-dev-test.sh index c5f1c4e53..4e612c816 100755 --- a/tests/grafana-dev-test.sh +++ b/tests/grafana-dev-test.sh @@ -24,7 +24,7 @@ do kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev | grep "3/3" | grep "Running" && break n=$((n+1)) echo "Retrying in 10s for waiting for grafana-dev pod ready ..." - sleep 10 + sleep 60 done if [ $n -eq 30 ]; then From 6fca3840147b25e43d11b6e11d978c66b7c889ef Mon Sep 17 00:00:00 2001 From: OpenShift Cherrypick Robot Date: Fri, 21 Jul 2023 02:54:44 +0000 Subject: [PATCH 072/150] import case to ignore case sensitive of vmware (#185) Signed-off-by: Chang Liang Qu Co-authored-by: Chang Liang Qu --- tests/pkg/tests/observability_reconcile_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 597c50050..fd1fedbf8 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -43,7 +43,10 @@ var _ = Describe("", func() { }) It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { - if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring := "vmware" + if strings.Contains(cloudProvider, substring) { + //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { Skip("Skip the case due to it's not supported on the VMWARE") } By("Modifying MCO CR for reconciling") @@ -134,7 +137,10 @@ var _ = Describe("", func() { }) It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { - if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring := "vmware" + if strings.Contains(cloudProvider, substring) { + //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { Skip("Skip the case due to it's not supported on the VMWARE") } By("Resizing alertmanager storage") @@ -153,7 +159,10 @@ var _ = Describe("", func() { }) It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { - if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring := "vmware" + if strings.Contains(cloudProvider, substring) { + //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { Skip("Skip the case due to it's not supported on the VMWARE") } advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) From 24249596403588f02cfaa6231a9160cdd79ccebb Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 21 Jul 2023 10:38:05 +0800 Subject: [PATCH 073/150] skip the extanding pvc case on IBM cloud Signed-off-by: Chang Liang Qu --- .../pkg/tests/observability_reconcile_test.go | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index fd1fedbf8..11df8b777 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -44,10 +44,11 @@ var _ = Describe("", func() { It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) - substring := "vmware" - if strings.Contains(cloudProvider, substring) { + substring1 := "vmware" + substring2 := "ibm" + if strings.Contains(cloudProvider, substring1) || strings.Contains(cloudProvider, substring2) { //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { - Skip("Skip the case due to it's not supported on the VMWARE") + Skip("Skip the case due to it's not supported on the VMWARE and IBM") } By("Modifying MCO CR for reconciling") err := utils.ModifyMCOCR(testOptions) @@ -138,10 +139,11 @@ var _ = Describe("", func() { It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) - substring := "vmware" - if strings.Contains(cloudProvider, substring) { + substring1 := "vmware" + substring2 := "ibm" + if strings.Contains(cloudProvider, substring1) || strings.Contains(cloudProvider, substring2) { //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { - Skip("Skip the case due to it's not supported on the VMWARE") + Skip("Skip the case due to it's not supported on the VMWARE and IBM") } By("Resizing alertmanager storage") alertmans, _ := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -160,10 +162,11 @@ var _ = Describe("", func() { It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) - substring := "vmware" - if strings.Contains(cloudProvider, substring) { + substring1 := "vmware" + substring2 := "ibm" + if strings.Contains(cloudProvider, substring1) || strings.Contains(cloudProvider, substring2) { //if strings.Contains(string(os.Getenv("CLOUD_PROVIDER")), "VMWARE") { - Skip("Skip the case due to it's not supported on the VMWARE") + Skip("Skip the case due to it's not supported on the VMWARE and IBM") } advRetentionCon, err := utils.CheckAdvRetentionConfig(testOptions) if !advRetentionCon { From 17e1c3863ea290837bf133876c0719ab7de3bcf1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 3 Aug 2023 16:53:01 +0800 Subject: [PATCH 074/150] add tag for ocpInterop Signed-off-by: Chang Liang Qu --- Jenkinsfile | 6 ++++++ tests/pkg/tests/observability_addon_test.go | 12 ++++++------ tests/pkg/tests/observability_alert_test.go | 18 +++++++++--------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 10 +++++----- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../pkg/tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ ...bservability_observatorium_preserve_test.go | 2 +- tests/pkg/tests/observability_route_test.go | 2 +- 14 files changed, 48 insertions(+), 42 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 533c6b79c..a4b9b3c45 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -26,6 +26,7 @@ pipeline { password(name:'AWS_SECRET_ACCESS_KEY', defaultValue: '', description: 'AWS secret access key') string(name:'SKIP_INSTALL_STEP', defaultValue: 'false', description: 'Skip Observability installation') string(name:'SKIP_UNINSTALL_STEP', defaultValue: 'true', description: 'Skip Observability uninstallation') + string(name:'TAGGING', defaultValue: '', description: 'with tagging value to run the specific test cases') string(name:'USE_MINIO', defaultValue: 'false', description: 'If no AWS S3 bucket, you could use minio as object storage to instead') } environment { @@ -56,6 +57,7 @@ pipeline { export REGION="${params.REGION}" export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" + export TAGGING="${params.TAGGING}" if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" @@ -93,7 +95,11 @@ pipeline { /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"\$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"\$MAKUBECONFIG"'"' resources/options.yaml cat resources/options.yaml + if [[ -n "${params.TAGGING}" ]]; then + ginkgo --focus="\$MANAGED_CLUSTER_USER" -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + else ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + fi fi """ } diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 4af9cd9ba..41f900891 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability] (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability][ocpInterop] (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability][ocpInterop] (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability] (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability][ocpInterop] (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable] (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable][ocpInterop] (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -172,7 +172,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable] (deploy/g1)", func() { + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable][ocpInterop] (deploy/g1)", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) @@ -186,7 +186,7 @@ var _ = Describe("", func() { } }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability] (addon/g1) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability][ocpInterop] (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 31bebd478..5303111fb 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable] (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -275,7 +275,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration] (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration][ocpInterop] (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 84ec7ef7d..90c184bfd 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration] (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration][ocpInterop] (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index e6c08c5f9..d665e5f2c 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable](config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable][ocpInterop](config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable](config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable][ocpInterop](config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable](config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable][ocpInterop](config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable] (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration][ocpInterop] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 773bdf1c2..509b342cf 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index d4aff3750..557e85e6e 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable] (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable] (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index e2516d03c..2940554f2 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability] (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability][ocpInterop] (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable] (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable][ocpInterop] (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable] (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable][ocpInterop] (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index d6f3c11dc..15a4e582f 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration] (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration][ocpInterop] (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 1b118af09..3b910b52d 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable] (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable] (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable][ocpInterop] (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 8236783e5..1c1ecc03f 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork [ocpInterop] (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 606a98eda..7f2368561 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration] (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration] (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration] (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration][ocpInterop] (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 91584940a..449842f4d 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability] (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability][ocpInterop] (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index d958e15a4..22c451d27 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration] Should access metrics via rbac-query-proxy route (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration][ocpInterop] Should access metrics via rbac-query-proxy route (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From 13a45148b248846e7ced4a6ab75dd55347f71565 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 3 Aug 2023 16:57:48 +0800 Subject: [PATCH 075/150] update focus value Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index a4b9b3c45..a3f51ecad 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -96,7 +96,7 @@ pipeline { /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"\$MAKUBECONFIG"'"' resources/options.yaml cat resources/options.yaml if [[ -n "${params.TAGGING}" ]]; then - ginkgo --focus="\$MANAGED_CLUSTER_USER" -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + ginkgo --focus="\$TAGGING" -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 else ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 fi From 6403d1c39b218d254820f0cedab3de072b208b84 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 23 Aug 2023 18:04:59 +0800 Subject: [PATCH 076/150] update tag with @ocpInterop Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 12 ++++++------ tests/pkg/tests/observability_alert_test.go | 18 +++++++++--------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 10 +++++----- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../pkg/tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ ...bservability_observatorium_preserve_test.go | 2 +- tests/pkg/tests/observability_route_test.go | 2 +- 13 files changed, 42 insertions(+), 42 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 41f900891..fcd2c4521 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability][ocpInterop] (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability][ocpInterop] (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability][ocpInterop] (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable][ocpInterop] (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -172,7 +172,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable][ocpInterop] (deploy/g1)", func() { + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop (deploy/g1)", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) @@ -186,7 +186,7 @@ var _ = Describe("", func() { } }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability][ocpInterop] (addon/g1) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 5303111fb..233dd40a5 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable][ocpInterop] (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -275,7 +275,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration][ocpInterop] (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 90c184bfd..3b47aa750 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration][ocpInterop] (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index d665e5f2c..c4fcb4e8f 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable][ocpInterop](config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable][ocpInterop](config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable][ocpInterop](config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration][ocpInterop] Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 509b342cf..838a20499 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable][ocpInterop] (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 557e85e6e..e66b7bbfa 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable][ocpInterop] (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 2940554f2..d6bbebf8a 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability][ocpInterop] (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable][ocpInterop] (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable][ocpInterop] (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 15a4e582f..6023d19c2 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration][ocpInterop] (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 3b910b52d..21c3ab7b8 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable][ocpInterop] (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable][ocpInterop] (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 1c1ecc03f..0f5b22155 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork [ocpInterop] (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 7f2368561..8685bb3e7 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration][ocpInterop] (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration][ocpInterop] (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 449842f4d..386803f4b 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability][ocpInterop] (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 22c451d27..139742d97 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration][ocpInterop] Should access metrics via rbac-query-proxy route (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop Should access metrics via rbac-query-proxy route (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From 9badb9928de82c11f3a65606edfa849ce6186574 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 30 Aug 2023 16:46:51 +0800 Subject: [PATCH 077/150] add tag for @e2e Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 8 ++++---- tests/pkg/tests/observability_alert_test.go | 20 +++++++++---------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 14 ++++++------- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- tests/pkg/tests/observability_export_test.go | 2 +- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 +++++------ ...servability_observatorium_preserve_test.go | 2 +- .../pkg/tests/observability_reconcile_test.go | 10 +++++----- .../pkg/tests/observability_retention_test.go | 10 +++++----- tests/pkg/tests/observability_route_test.go | 2 +- 16 files changed, 54 insertions(+), 54 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index fcd2c4521..6be872751 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @e2e (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 233dd40a5..27f9bf594 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -275,7 +275,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @e2e (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, @@ -378,7 +378,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] (alertforward/g1)", func() { + It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] @e2e (alertforward/g1)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 3b47aa750..d7d0d965c 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @e2e (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index c4fcb4e8f..7e4792a00 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop(config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop(config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop(config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -236,7 +236,7 @@ var _ = Describe("", func() { }, } - It("RHACM4K-2822: Observability: Verify the replica in advanced config for Observability components @BVT - [P1][Sev1][Observability][Integration] (config/g0)", func() { + It("RHACM4K-2822: Observability: Verify the replica in advanced config for Observability components @BVT - [P1][Sev1][Observability][Integration] @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -273,7 +273,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] (config/g0)", func() { + It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 838a20499..3a5d9ed60 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index e66b7bbfa..d264df57f 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index d6bbebf8a..aae6e7950 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @e2e (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @e2e (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @e2e (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index fffbde32a..6f27a1aaf 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -38,7 +38,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-11170: Observability: Verify metrics would be exported to corp tools(2.5)(draft)[P2][Sev2][observability][Integration] Should have acm_remote_write_requests_total metrics with correct labels/value (export/g0)", func() { + It("RHACM4K-11170: Observability: Verify metrics would be exported to corp tools(2.5)(draft)[P2][Sev2][observability][Integration] Should have acm_remote_write_requests_total metrics with correct labels/value @e2e (export/g0)", func() { By("Adding victoriametrics deployment/service/secret") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/export"}) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 6023d19c2..26ae0bc11 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @e2e (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 21c3ab7b8..fca13f193 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 0f5b22155..5fed8d7c2 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @e2e (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 8685bb3e7..51d302edb 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 386803f4b..c608f9a9c 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @e2e (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 11df8b777..8d7cea9ab 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -42,7 +42,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -105,7 +105,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { By("Checking podAntiAffinity for all pods") Eventually(func() error { err := utils.CheckAllPodsAffinity(testOptions) @@ -137,7 +137,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -160,7 +160,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index 59de8110e..cd9a575c5 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -63,7 +63,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { By("--delete-delay=" + deleteDelay) Eventually(func() error { compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -82,7 +82,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) Eventually(func() error { stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -104,7 +104,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { By("--tsdb.block-duration=" + blockDuration) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 139742d97..26f68299d 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop Should access metrics via rbac-query-proxy route (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From 585a84bd3b18a0af4a8ebc85ef04aebbc5de4e3d Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 30 Aug 2023 16:58:17 +0800 Subject: [PATCH 078/150] add tag for tag @post-release Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 4 ++-- tests/pkg/tests/observability_alert_test.go | 16 ++++++++-------- tests/pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 12 ++++++------ tests/pkg/tests/observability_dashboard_test.go | 6 +++--- tests/pkg/tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../pkg/tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../pkg/tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ .../observability_observatorium_preserve_test.go | 2 +- tests/pkg/tests/observability_reconcile_test.go | 10 +++++----- tests/pkg/tests/observability_retention_test.go | 10 +++++----- 14 files changed, 47 insertions(+), 47 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 6be872751..136316b9d 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @post-release (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 27f9bf594..a451580e3 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @e2e (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index d7d0d965c..0bed8c05f 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @e2e (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @e2e @post-release (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 7e4792a00..84742c291 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e(config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -273,7 +273,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] @e2e (config/g0)", func() { + It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] @e2e @post-release (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e Checking service account annotations is set for store/query/rule/compact/receive (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 3a5d9ed60..1800602ec 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index d264df57f..d64ef9a04 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index aae6e7950..a6133b011 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @e2e (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @e2e @post-release (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @e2e (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @e2e (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 26ae0bc11..2fb95df3b 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @e2e (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @e2e @post-release (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index fca13f193..fa55da410 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @e2e (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 5fed8d7c2..db51c1e88 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @e2e (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @e2e @post-release (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 51d302edb..b670c1daa 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index c608f9a9c..996c08e4f 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @e2e (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @e2e @post-release (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 8d7cea9ab..d21abd4f8 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -42,7 +42,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -105,7 +105,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { + It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { + It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { By("Checking podAntiAffinity for all pods") Eventually(func() error { err := utils.CheckAllPodsAffinity(testOptions) @@ -137,7 +137,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { + It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -160,7 +160,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index cd9a575c5..da1163bb7 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -63,7 +63,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { By("--delete-delay=" + deleteDelay) Eventually(func() error { compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -82,7 +82,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) Eventually(func() error { stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -104,7 +104,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { By("--tsdb.block-duration=" + blockDuration) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ From 0bc9ee9ed5729ddd4dce2523c96b5c9c689c95c3 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 30 Aug 2023 17:01:39 +0800 Subject: [PATCH 079/150] add tag for @pre-upgrade Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 8 ++++---- tests/pkg/tests/observability_config_test.go | 4 ++-- tests/pkg/tests/observability_dashboard_test.go | 6 +++--- tests/pkg/tests/observability_deployment_test.go | 6 +++--- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 136316b9d..ba0f996a0 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @e2e (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @e2e @pre-upgrade (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @post-release (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @post-release @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 84742c291..95e0bcbe3 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -273,7 +273,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] @e2e @post-release (config/g0)", func() { + It("RHACM4K-3419: Observability: Persist advance values in MCO CR - Checking resources in advanced config [P2][Sev2][Observability][Integration] @e2e @post-release @pre-upgrade (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) if err != nil { panic(err.Error()) @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 1800602ec..32d378009 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index d64ef9a04..57a983932 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index b670c1daa..0ea7f6ebe 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( From da46100c76433f7b934313d3cd25a04ea97d5026 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 30 Aug 2023 17:21:42 +0800 Subject: [PATCH 080/150] add tag for @post-upgrade Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 12 ++++++------ tests/pkg/tests/observability_alert_test.go | 18 +++++++++--------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 10 +++++----- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../pkg/tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ ...bservability_observatorium_preserve_test.go | 2 +- .../pkg/tests/observability_reconcile_test.go | 10 +++++----- .../pkg/tests/observability_retention_test.go | 10 +++++----- tests/pkg/tests/observability_route_test.go | 2 +- 15 files changed, 52 insertions(+), 52 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index ba0f996a0..951f71cf2 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @e2e @pre-upgrade (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @post-upgrade @e2e @pre-upgrade (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @e2e @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -172,7 +172,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop (deploy/g1)", func() { + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade (deploy/g1)", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) @@ -186,7 +186,7 @@ var _ = Describe("", func() { } }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop (addon/g1) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @post-upgrade (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index a451580e3..cc0876f6b 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -275,7 +275,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @e2e (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 0bed8c05f..19670ef5e 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @e2e @post-release (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 95e0bcbe3..2ca6bdbec 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release(config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @post-upgrade @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 32d378009..1e70ea960 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 57a983932..71b235be2 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release @pre-upgrade (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index a6133b011..50b5af49a 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @e2e @post-release (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 2fb95df3b..9164a312f 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @e2e @post-release (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index fa55da410..861edd306 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @e2e @post-release (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index db51c1e88..a6d6a5a32 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @e2e @post-release (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @post-upgrade @e2e @post-release (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 0ea7f6ebe..2c0ec3092 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @e2e @post-release @pre-upgrade (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 996c08e4f..8a3ba15b7 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @e2e @post-release (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @post-upgrade @e2e @post-release (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index d21abd4f8..60294ee89 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -42,7 +42,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -105,7 +105,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { + It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { + It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { By("Checking podAntiAffinity for all pods") Eventually(func() error { err := utils.CheckAllPodsAffinity(testOptions) @@ -137,7 +137,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { + It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -160,7 +160,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e @post-release (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index da1163bb7..600e25d33 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -63,7 +63,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { By("--delete-delay=" + deleteDelay) Eventually(func() error { compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -82,7 +82,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) Eventually(func() error { stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -104,7 +104,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { By("--tsdb.block-duration=" + blockDuration) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 26f68299d..3c6931c13 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @post-upgrade Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From be1f036b56900a0a50f5db01720d584213dbcee4 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 30 Aug 2023 17:28:13 +0800 Subject: [PATCH 081/150] add tag for @post-restore Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 12 ++++++------ tests/pkg/tests/observability_alert_test.go | 18 +++++++++--------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 10 +++++----- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../pkg/tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 ++++++------ ...bservability_observatorium_preserve_test.go | 2 +- .../pkg/tests/observability_reconcile_test.go | 10 +++++----- .../pkg/tests/observability_retention_test.go | 10 +++++----- tests/pkg/tests/observability_route_test.go | 2 +- 15 files changed, 52 insertions(+), 52 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 951f71cf2..ccd0f8f6f 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @post-upgrade @e2e @pre-upgrade (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -118,7 +118,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @e2e @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -133,7 +133,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -172,7 +172,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade (deploy/g1)", func() { + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore (deploy/g1)", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) @@ -186,7 +186,7 @@ var _ = Describe("", func() { } }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @post-upgrade (addon/g1) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index cc0876f6b..68937d6cb 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -50,7 +50,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -75,7 +75,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -84,7 +84,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -97,7 +97,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -106,7 +106,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -133,7 +133,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -204,7 +204,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -234,7 +234,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -275,7 +275,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e (alertforward/g0)", func() { amURL := url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 19670ef5e..0751eedc6 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 2ca6bdbec..53d84b000 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release(config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @post-upgrade @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 1e70ea960..6bf49ba13 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 71b235be2..0b251c9d6 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -28,7 +28,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -54,7 +54,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check endpoint-operator and metrics-collector pods are ready") Eventually(func() error { err = utils.CheckAllOBAsEnabledLocal(testOptions) @@ -68,7 +68,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 50b5af49a..03f706e61 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index 9164a312f..d63c9c9ed 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 861edd306..25983f690 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @e2e @post-release (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index a6d6a5a32..c3a777e1e 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @post-upgrade @e2e @post-release (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @post-upgrade @post-restore @e2e @post-release (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 2c0ec3092..785d9f817 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @e2e @post-release @pre-upgrade (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 8a3ba15b7..357d09c26 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @post-upgrade @e2e @post-release (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" diff --git a/tests/pkg/tests/observability_reconcile_test.go b/tests/pkg/tests/observability_reconcile_test.go index 60294ee89..3360d5579 100644 --- a/tests/pkg/tests/observability_reconcile_test.go +++ b/tests/pkg/tests/observability_reconcile_test.go @@ -42,7 +42,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - tune retention settings in MCO CR [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -105,7 +105,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { + It("RHACM4K-1655: Observability: Verify nodeSelector setting effects for Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (reconcile/g0)", func() { By("Checking node selector spec in MCO CR") mcoSC, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { + It("RHACM4K-1657: Observability: Check affinity rule takes effect on Observability components [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (reconcile/g0)", func() { By("Checking podAntiAffinity for all pods") Eventually(func() error { err := utils.CheckAllPodsAffinity(testOptions) @@ -137,7 +137,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { + It("RHACM4K-2821: Observability: Customize the Observability components storage size [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" @@ -160,7 +160,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (reconcile/g0)", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Revert MCO CR changes [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (reconcile/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "vmware" substring2 := "ibm" diff --git a/tests/pkg/tests/observability_retention_test.go b/tests/pkg/tests/observability_retention_test.go index 600e25d33..66b536646 100644 --- a/tests/pkg/tests/observability_retention_test.go +++ b/tests/pkg/tests/observability_retention_test.go @@ -63,7 +63,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check compact args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (retention/g0):", func() { By("--delete-delay=" + deleteDelay) Eventually(func() error { compacts, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -82,7 +82,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check store args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (retention/g0):", func() { By("--ignore-deletion-marks-delay=" + ignoreDeletionMarksDelay) Eventually(func() error { stores, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -104,7 +104,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check receive args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { receives, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -126,7 +126,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (retention/g0):", func() { By("--tsdb.retention=" + retentionInLocal) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -148,7 +148,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade (retention/g0):", func() { + It("RHACM4K-2881: Observability: Check and tune backup retention settings in MCO CR - Check rule args [P2][Sev2][Observability][Stable] @e2e @post-release @post-upgrade @post-restore (retention/g0):", func() { By("--tsdb.block-duration=" + blockDuration) Eventually(func() error { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 3c6931c13..6217e53b5 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @post-upgrade Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @post-upgrade @post-restore Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query From 0baf78fc0aacf7e86622273cc96767b024e43c30 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 31 Aug 2023 10:58:37 +0800 Subject: [PATCH 082/150] remove result file before running automation Signed-off-by: Chang Liang Qu --- Jenkinsfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Jenkinsfile b/Jenkinsfile index a3f51ecad..7fe7f8f79 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -87,6 +87,7 @@ pipeline { set -x export KUBECONFIG=~/.kube/config go mod vendor && ginkgo build ./tests/pkg/tests/ + rm -rf tests/pkg/tests/*.xml cd tests cp resources/options.yaml.template resources/options.yaml /usr/local/bin/yq e -i '.options.hub.name="'"\$HUB_CLUSTER_NAME"'"' resources/options.yaml From ad3e3d20142096c386f628ebf5eab90a704af587 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 31 Aug 2023 11:15:17 +0800 Subject: [PATCH 083/150] update ci script for tagging Signed-off-by: Chang Liang Qu --- execute_obs_interop_commands.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/execute_obs_interop_commands.sh b/execute_obs_interop_commands.sh index 04989512c..2042cb4f5 100644 --- a/execute_obs_interop_commands.sh +++ b/execute_obs_interop_commands.sh @@ -18,6 +18,7 @@ export REGION=${REGION:-'us-east-1'} export USE_MINIO=${USE_MINIO:-'false'} export SKIP_INSTALL_STEP=${SKIP_INSTALL_STEP:-'false'} export SKIP_UNINSTALL_STEP=${SKIP_UNINSTALL_STEP:-'true'} +export TAGGING=${TAGGING:-} if [[ -n ${PARAM_AWS_ACCESS_KEY_ID} ]]; then export AWS_ACCESS_KEY_ID=${PARAM_AWS_ACCESS_KEY_ID} @@ -63,5 +64,5 @@ else /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"$MAKUBECONFIG"'"' resources/options.yaml cat resources/options.yaml - ginkgo -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + ginkgo --focus=$TAGGING -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 fi From 557ababb6b570a295e9d5e6b15271b94deb96173 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 4 Sep 2023 16:54:47 +0800 Subject: [PATCH 084/150] skip case 1443 for ocpInterop Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_observatorium_preserve_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_observatorium_preserve_test.go b/tests/pkg/tests/observability_observatorium_preserve_test.go index 357d09c26..9472789f9 100644 --- a/tests/pkg/tests/observability_observatorium_preserve_test.go +++ b/tests/pkg/tests/observability_observatorium_preserve_test.go @@ -26,7 +26,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release (observatorium_preserve/g0) -", func() { + Context("RHACM4K-1443: Observability: Verify Observatorium CR configuration compliance [P1][Sev1][Observability]@post-upgrade @post-restore @e2e @post-release (observatorium_preserve/g0) -", func() { It("[Stable] Updating observatorium cr (spec.thanos.compact.retentionResolution1h) should be automatically reverted", func() { oldCRResourceVersion := "" updateRetention := "10d" From 26299d945c05c4c6cedb66e47b6dff5859f1b0ec Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 14 Sep 2023 16:59:04 +0800 Subject: [PATCH 085/150] update ManagedClusterAddonsStatusMessage to distinguish ManagedClusterAddOnEnabledMessage Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_oba.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go index 45f5887d6..5c035badf 100644 --- a/tests/pkg/utils/mco_oba.go +++ b/tests/pkg/utils/mco_oba.go @@ -13,6 +13,7 @@ import ( const ( ManagedClusterAddOnDisabledMessage = "enableMetrics is set to False" ManagedClusterAddOnEnabledMessage = "Cluster metrics sent successfully" + ManagedClusterAddonsStatusMessage = "observability-controller add-on is available" ) func CheckOBAStatus(opt TestOptions, namespace, status string) error { @@ -81,7 +82,7 @@ func CheckAllOBAsEnabled(opt TestOptions) error { } klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) - err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddonsStatusMessage) if err != nil { return err } @@ -105,7 +106,7 @@ func CheckAllOBAsEnabledLocal(opt TestOptions) error { } klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) - err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddonsStatusMessage) if err != nil { return err } From 878f09fd614cc2576e53c4a9512b61d4f4b528b7 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 22 Sep 2023 14:55:41 +0800 Subject: [PATCH 086/150] update collection interval to 300 Signed-off-by: Chang Liang Qu --- examples/export/v1beta2/custom-certs/observability.yaml | 2 +- examples/export/v1beta2/observability.yaml | 2 +- .../updatemcocr/v1beta2-observability-maxitemsize.yaml | 2 +- examples/mco/e2e/v1beta2/custom-certs/observability.yaml | 2 +- examples/mco/e2e/v1beta2/observability.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index da76fef20..a350fe4d0 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -104,7 +104,7 @@ spec: kubernetes.io/os: linux observabilityAddonSpec: enableMetrics: true - interval: 30 + interval: 300 resources: limits: cpu: 200m diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index 7eec99531..4ef55eb2c 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -104,7 +104,7 @@ spec: kubernetes.io/os: linux observabilityAddonSpec: enableMetrics: true - interval: 30 + interval: 300 resources: limits: cpu: 200m diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 7f6e636ec..3f44d0143 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -104,7 +104,7 @@ spec: kubernetes.io/os: linux observabilityAddonSpec: enableMetrics: true - interval: 30 + interval: 300 resources: limits: cpu: 200m diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index a1f1f3951..9d24e9166 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -104,7 +104,7 @@ spec: kubernetes.io/os: linux observabilityAddonSpec: enableMetrics: true - interval: 30 + interval: 300 resources: limits: cpu: 200m diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 13588d5b4..9a301048b 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -104,7 +104,7 @@ spec: kubernetes.io/os: linux observabilityAddonSpec: enableMetrics: true - interval: 30 + interval: 300 resources: limits: cpu: 200m From 14997040a52a61cf63d6d55a741f4b13921d2346 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 26 Sep 2023 11:11:26 +0800 Subject: [PATCH 087/150] update case interval to 300 to cordinate deployment interval update Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 53d84b000..a1cb49bd9 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -123,7 +123,7 @@ var _ = Describe("", func() { } observabilityAddonSpec := mcoRes.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{}) Expect(observabilityAddonSpec["enableMetrics"]).To(Equal(true)) - Expect(observabilityAddonSpec["interval"]).To(Equal(int64(30))) + Expect(observabilityAddonSpec["interval"]).To(Equal(int64(300))) }) It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (config/g0)", func() { From 9c420c67fa8dcbf2f5d6a19507b89d83794d26d3 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 26 Oct 2023 14:11:01 +0800 Subject: [PATCH 088/150] correct minio true setting Signed-off-by: Chang Liang Qu --- Jenkinsfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7fe7f8f79..3443bcb96 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -58,6 +58,8 @@ pipeline { export SKIP_INSTALL_STEP="${params.SKIP_INSTALL_STEP}" export SKIP_UNINSTALL_STEP="${params.SKIP_UNINSTALL_STEP}" export TAGGING="${params.TAGGING}" + export USE_MINIO="${params.USE_MINIO}" + export IS_CANARY_ENV=true if [[ -n "${params.AWS_ACCESS_KEY_ID}" ]]; then export AWS_ACCESS_KEY_ID="${params.AWS_ACCESS_KEY_ID}" @@ -67,8 +69,8 @@ pipeline { export AWS_SECRET_ACCESS_KEY="${params.AWS_SECRET_ACCESS_KEY}" fi - if [[ "${!params.USE_MINIO}" == false ]]; then - export IS_CANARY_ENV=true + if [[ "${!params.USE_MINIO}" == true ]]; then + export IS_CANARY_ENV=false fi if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then From 2409409bcf09bc9135e51081b3945073d6e2a5ad Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 26 Oct 2023 14:35:39 +0800 Subject: [PATCH 089/150] correct minio option setting Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 3443bcb96..9d8eeb190 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -69,7 +69,7 @@ pipeline { export AWS_SECRET_ACCESS_KEY="${params.AWS_SECRET_ACCESS_KEY}" fi - if [[ "${!params.USE_MINIO}" == true ]]; then + if [[ "${params.USE_MINIO}" == true ]]; then export IS_CANARY_ENV=false fi From 777919cfe0bd9c104b68f2b83e395fbce988ab31 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 1 Nov 2023 15:35:09 +0800 Subject: [PATCH 090/150] ignore csv_abormal Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index 67bf12350..27f7fca75 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -77,3 +77,4 @@ cluster:policy_governance_info:propagated_noncompliant_count policy:policy_governance_info:propagated_count policy:policy_governance_info:propagated_noncompliant_count namespace_cpu:kube_pod_container_resource_requests:sum +csv_abnormal From de9259c7d96756afa44d4eab122999a5f77c0dba Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Nov 2023 09:57:29 +0800 Subject: [PATCH 091/150] increase receive, rule, and alertmanager replica Signed-off-by: Chang Liang Qu --- examples/export/v1beta2/observability.yaml | 6 +++--- .../updatemcocr/v1beta2-observability-maxitemsize.yaml | 6 +++--- examples/mco/e2e/v1beta2/custom-certs/observability.yaml | 6 +++--- examples/mco/e2e/v1beta2/observability.yaml | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index 4ef55eb2c..8ba78989c 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -45,7 +45,7 @@ spec: limits: cpu: 1 memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: @@ -53,7 +53,7 @@ spec: limits: cpu: 1 memory: 1Gi - replicas: 1 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' store: @@ -89,7 +89,7 @@ spec: cpu: 1 memory: 1Gi alertmanager: - replicas: 2 + replicas: 3 resources: limits: cpu: 100m diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 3f44d0143..39d83cc63 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -45,7 +45,7 @@ spec: limits: cpu: 1 memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: @@ -53,7 +53,7 @@ spec: limits: cpu: 1 memory: 1Gi - replicas: 1 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' store: @@ -89,7 +89,7 @@ spec: cpu: 1 memory: 1Gi alertmanager: - replicas: 2 + replicas: 3 resources: limits: cpu: 100m diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 9d24e9166..033dc3e12 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -45,7 +45,7 @@ spec: limits: cpu: 1 memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: @@ -53,7 +53,7 @@ spec: limits: cpu: 1 memory: 1Gi - replicas: 1 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' store: @@ -89,7 +89,7 @@ spec: cpu: 1 memory: 1Gi alertmanager: - replicas: 2 + replicas: 3 resources: limits: cpu: 100m diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 9a301048b..3e469424f 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -45,7 +45,7 @@ spec: limits: cpu: 1 memory: 4Gi - replicas: 2 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' rule: @@ -53,7 +53,7 @@ spec: limits: cpu: 1 memory: 1Gi - replicas: 1 + replicas: 3 serviceAccountAnnotations: test.com/role-arn: 's3_role' store: @@ -89,7 +89,7 @@ spec: cpu: 1 memory: 1Gi alertmanager: - replicas: 2 + replicas: 3 resources: limits: cpu: 100m From 1393e781fc7da925c642c0513e9b7bb2bb81d8a7 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Nov 2023 10:49:38 +0800 Subject: [PATCH 092/150] replace by new bucket on jenkins Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9d8eeb190..7ba376fb3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -20,7 +20,7 @@ pipeline { string(name:'MANAGED_CLUSTER_USER', defaultValue: 'kubeadmin', description: 'Managed Cluster User Name') string(name:'MANAGED_CLUSTER_PASS', defaultValue: '', description: 'Managed cluster Password') string(name:'MANAGED_CLUSTER_API_URL', defaultValue: '', description: 'Managed cluster API URL') - string(name:'BUCKET', defaultValue: 'obs-v1', description: 'Bucket name') + string(name:'BUCKET', defaultValue: 'obs-auto-bucket', description: 'Bucket name') string(name:'REGION', defaultValue: 'us-east-1', description: 'Bucket region') password(name:'AWS_ACCESS_KEY_ID', defaultValue: '', description: 'AWS access key ID') password(name:'AWS_SECRET_ACCESS_KEY', defaultValue: '', description: 'AWS secret access key') From cc1e7c35707509036218a53ab108e42b94ecfed5 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 3 Nov 2023 14:20:44 +0800 Subject: [PATCH 093/150] remove unsupported cleanupInterval Signed-off-by: Chang Liang Qu --- examples/export/v1beta2/custom-certs/observability.yaml | 1 - examples/export/v1beta2/observability.yaml | 1 - .../updatemcocr/v1beta2-observability-maxitemsize.yaml | 1 - examples/mco/e2e/v1beta2/custom-certs/observability.yaml | 1 - examples/mco/e2e/v1beta2/observability.yaml | 1 - 5 files changed, 5 deletions(-) diff --git a/examples/export/v1beta2/custom-certs/observability.yaml b/examples/export/v1beta2/custom-certs/observability.yaml index a350fe4d0..b46ded2fb 100644 --- a/examples/export/v1beta2/custom-certs/observability.yaml +++ b/examples/export/v1beta2/custom-certs/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/export/v1beta2/observability.yaml b/examples/export/v1beta2/observability.yaml index 8ba78989c..82bf3ef0e 100644 --- a/examples/export/v1beta2/observability.yaml +++ b/examples/export/v1beta2/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml index 39d83cc63..e2f31f6ad 100644 --- a/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml +++ b/examples/maxitemsize/updatemcocr/v1beta2-observability-maxitemsize.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml index 033dc3e12..3ec9c6bc5 100644 --- a/examples/mco/e2e/v1beta2/custom-certs/observability.yaml +++ b/examples/mco/e2e/v1beta2/custom-certs/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d diff --git a/examples/mco/e2e/v1beta2/observability.yaml b/examples/mco/e2e/v1beta2/observability.yaml index 3e469424f..3bf03e8fa 100644 --- a/examples/mco/e2e/v1beta2/observability.yaml +++ b/examples/mco/e2e/v1beta2/observability.yaml @@ -7,7 +7,6 @@ spec: advanced: retentionConfig: blockDuration: 3h - cleanupInterval: 6m deleteDelay: 50h retentionInLocal: 5d retentionResolution1h: 31d From 056e0193d8862b32b31790368a7874704d1c4362 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 17 Jan 2024 09:57:50 +0800 Subject: [PATCH 094/150] case-39481 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 35 ++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 68937d6cb..807a82c4b 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -13,6 +13,7 @@ import ( "net/http" "net/url" "os" + "os/exec" "reflect" "sort" "strings" @@ -50,6 +51,30 @@ var _ = Describe("", func() { } secret := "alertmanager-config" + It("RHACM4K-39481: Observability: Verify PrometheusRule resource(2.9) [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g1)", func() { + By("Checking if PrometheusRule: acm-observability-alert-rules is existed") + + command := "oc" + args := []string{"get", "prometheusrules.monitoring.coreos.com", "acm-observability-alert-rules", "-n", "open-cluster-management-observability"} + + output, err := exec.Command(command, args...).CombinedOutput() + if err != nil { + fmt.Printf("Error executing command: %v\n", err) + fmt.Printf("Command output:\n%s\n", output) + return + } + + prometheusRule := "acm-observability-alert-rules" + if strings.Contains(string(output), prometheusRule) { + fmt.Println("Expected result found.") + } else { + fmt.Println("Expected result not found.") + } + + fmt.Printf("Command output:\n%s\n", output) + + }) + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { @@ -308,10 +333,13 @@ var _ = Describe("", func() { } expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + klog.V(3).Infof("expectedOCPClusterIDs is %s", expectedOCPClusterIDs) Expect(err).NotTo(HaveOccurred()) expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) + klog.V(3).Infof("expectedKSClusterNames is %s", expectedKSClusterNames) Expect(err).NotTo(HaveOccurred()) expectClusterIdentifiers := append(expectedOCPClusterIDs, expectedKSClusterNames...) + klog.V(3).Infof("expectClusterIdentifiers is %s", expectClusterIdentifiers) // install watchdog PrometheusRule to *KS clusters watchDogRuleKustomizationPath := "../../../examples/alerts/watchdog_rule" @@ -368,14 +396,19 @@ var _ = Describe("", func() { } sort.Strings(clusterIDsInAlerts) + klog.V(3).Infof("clusterIDsInAlerts is %s", clusterIDsInAlerts) sort.Strings(expectClusterIdentifiers) + klog.V(3).Infof("sort.Strings.expectClusterIdentifiers is %s", expectClusterIdentifiers) + klog.V(3).Infof("no sort.Strings.expectedOCPClusterIDs is %s", expectedOCPClusterIDs) sort.Strings(expectedOCPClusterIDs) + klog.V(3).Infof("sort.Strings.expectedOCPClusterIDs is %s", expectedOCPClusterIDs) if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) && !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { + //if !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { return fmt.Errorf("Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster") } return nil - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(Succeed()) }) It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] @e2e (alertforward/g1)", func() { From f19a3d0cf06737fae1ad6826b4c788f12762b987 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 22 Jan 2024 18:15:18 +0800 Subject: [PATCH 095/150] update jenkins to get cluster name Signed-off-by: Chang Liang Qu --- Jenkinsfile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 7ba376fb3..ca8ac6e8c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,6 +44,12 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" + BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:6443' '{print \$2}') + HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) + echo "BASE_DOMAIN: \$BASE_DOMAIN" + echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" + export HUB_CLUSTER_NAME="\$HUB_CLUSTER_NAME" + export BASE_DOMAIN="\$BASE_DOMAIN" export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" export BASE_DOMAIN="${params.BASE_DOMAIN}" export MANAGED_CLUSTER_NAME="${params.MANAGED_CLUSTER_NAME}" From 3fcf992a88803030369a5f47a5ae3c90c3ad8084 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 22 Jan 2024 18:17:28 +0800 Subject: [PATCH 096/150] remove previous export commands Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index ca8ac6e8c..c9f8b6af9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -50,8 +50,6 @@ pipeline { echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" export HUB_CLUSTER_NAME="\$HUB_CLUSTER_NAME" export BASE_DOMAIN="\$BASE_DOMAIN" - export HUB_CLUSTER_NAME="${params.HUB_CLUSTER_NAME}" - export BASE_DOMAIN="${params.BASE_DOMAIN}" export MANAGED_CLUSTER_NAME="${params.MANAGED_CLUSTER_NAME}" export MANAGED_CLUSTER_BASE_DOMAIN="${params.MANAGED_CLUSTER_BASE_DOMAIN}" export MANAGED_CLUSTER_USER="${params.MANAGED_CLUSTER_USER}" From ac6fe574c373e9ab810486fcda5cde7cb4df7889 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 22 Jan 2024 20:29:17 +0800 Subject: [PATCH 097/150] add echo for debug Signed-off-by: Chang Liang Qu --- Jenkinsfile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index c9f8b6af9..439c66518 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -76,7 +76,13 @@ pipeline { if [[ "${params.USE_MINIO}" == true ]]; then export IS_CANARY_ENV=false fi - + echo "BASE_DOMAIN: \$BASE_DOMAIN" + echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" + echo "${HUB_CLUSTER_NAME}" + echo "${BASE_DOMAIN}" + echo "${OC_CLUSTER_USER}" + echo "${OC_HUB_CLUSTER_PASS}" + echo "${OC_HUB_CLUSTER_API_URL}" if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then echo "Aborting test.. OCP HUB details are required for the test execution" exit 1 From a0cd20314bf671aa98bf7fea0fce87040fb8fc32 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 22 Jan 2024 20:32:24 +0800 Subject: [PATCH 098/150] remove previous hub name and base domain variables Signed-off-by: Chang Liang Qu --- Jenkinsfile | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 439c66518..131918022 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -75,15 +75,8 @@ pipeline { if [[ "${params.USE_MINIO}" == true ]]; then export IS_CANARY_ENV=false - fi - echo "BASE_DOMAIN: \$BASE_DOMAIN" - echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" - echo "${HUB_CLUSTER_NAME}" - echo "${BASE_DOMAIN}" - echo "${OC_CLUSTER_USER}" - echo "${OC_HUB_CLUSTER_PASS}" - echo "${OC_HUB_CLUSTER_API_URL}" - if [[ -z "${HUB_CLUSTER_NAME}" || -z "${BASE_DOMAIN}" || -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then + fi + if [[ -z "${OC_CLUSTER_USER}" || -z "${OC_HUB_CLUSTER_PASS}" || -z "${OC_HUB_CLUSTER_API_URL}" ]]; then echo "Aborting test.. OCP HUB details are required for the test execution" exit 1 else From d82a3b69c514f80b6bed3a7b3bc1e095bfcd1341 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 09:01:31 +0800 Subject: [PATCH 099/150] not hardcode port 6443 Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 131918022..591d6283d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:6443' '{print \$2}') + BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:' '{print \$2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 7af82f54db80f3a01784f1796205b84e279fa058 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 09:16:06 +0800 Subject: [PATCH 100/150] remove warning Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 591d6283d..bf900a00a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:' '{print \$2}') + BASE_DOMAIN=$(echo $OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:' '{print $2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 2079e7ebaee4fed4581203c01e6f93543284fcea Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 09:57:10 +0800 Subject: [PATCH 101/150] fix format issue Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index bf900a00a..39491e359 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -46,7 +46,7 @@ pipeline { export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" BASE_DOMAIN=$(echo $OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:' '{print $2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) - echo "BASE_DOMAIN: \$BASE_DOMAIN" + echo "BASE_DOMAIN: $BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" export HUB_CLUSTER_NAME="\$HUB_CLUSTER_NAME" export BASE_DOMAIN="\$BASE_DOMAIN" From 85086f307fc5344fbbd198ab7259c591692548f7 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:14:27 +0800 Subject: [PATCH 102/150] original PR file Signed-off-by: Chang Liang Qu --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 39491e359..131918022 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,9 +44,9 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=$(echo $OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:' '{print $2}') + BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:6443' '{print \$2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) - echo "BASE_DOMAIN: $BASE_DOMAIN" + echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" export HUB_CLUSTER_NAME="\$HUB_CLUSTER_NAME" export BASE_DOMAIN="\$BASE_DOMAIN" From 7969497d70418b93b864d36629b2bbceb298cef8 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:25:46 +0800 Subject: [PATCH 103/150] not hardcode 6443 with new code change Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 131918022..6f582d6c1 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\.|:6443' '{print \$2}') + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\.|:' '{print \$2}' | tr -d '[:space:]') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 0218f913b33b4fc6805f42af5393d128c09404ed Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:29:01 +0800 Subject: [PATCH 104/150] correct warning for \\ Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6f582d6c1..caca92278 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\.|:' '{print \$2}' | tr -d '[:space:]') + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\\\.|:' '{print \$2}' | tr -d '[:space:]') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 12f2727e4a142cd09c910166cd07a1ba1cde666b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:31:49 +0800 Subject: [PATCH 105/150] warning is gone, but not get the value, try to fix it Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index caca92278..ed7117c24 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\\\.|:' '{print \$2}' | tr -d '[:space:]') + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | sed -n 's|^https://api\.\(.*\):.*|\1|p') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 71b1c849a73f89bad9d681b4c612c00c16f6c502 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:35:31 +0800 Subject: [PATCH 106/150] trying new value Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index ed7117c24..8202f8b1a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | sed -n 's|^https://api\.\(.*\):.*|\1|p') + BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\\\.|:' '{print \$2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 86715e6050823e7d5e1e2124609454b6332c5a07 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:39:54 +0800 Subject: [PATCH 107/150] fixing by another way Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8202f8b1a..939fe82e8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \$OC_HUB_CLUSTER_API_URL | awk -F'api\\\\.|:' '{print \$2}') + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | cut -d'.' -f2 | cut -d':' -f1) HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 9a433ad60dbc5a520041525adb60b8c1745c0b14 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:43:20 +0800 Subject: [PATCH 108/150] continue fix Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 939fe82e8..2e22e733f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | cut -d'.' -f2 | cut -d':' -f1) + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\.|:' '{print \$2}' | awk -F'/' '{print \$1}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From 6911a08ee37f5fc09e9f5cdde03961fd4d6fc91a Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 25 Jan 2024 10:45:31 +0800 Subject: [PATCH 109/150] continue fix 2 Signed-off-by: Chang Liang Qu --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 2e22e733f..f9d2eb31d 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -44,7 +44,7 @@ pipeline { export OC_HUB_CLUSTER_PASS="${params.OC_HUB_CLUSTER_PASS}" set -x export OC_HUB_CLUSTER_API_URL="${params.OC_HUB_CLUSTER_API_URL}" - BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'api\\.|:' '{print \$2}' | awk -F'/' '{print \$1}') + BASE_DOMAIN=\$(echo \${OC_HUB_CLUSTER_API_URL} | awk -F'https://api\\.|:' '{print \$2}') HUB_CLUSTER_NAME=\$(echo \$BASE_DOMAIN | cut -d'.' -f1) echo "BASE_DOMAIN: \$BASE_DOMAIN" echo "HUB_CLUSTER_NAME: \$HUB_CLUSTER_NAME" From fa236741a93ea77ae0e34cfdc1f26ee66c490485 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 31 Jan 2024 18:03:49 +0800 Subject: [PATCH 110/150] update port for rosa 443 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability-e2e-test_suite_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index 93f7f4f0a..43361afb3 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -223,7 +223,8 @@ func initVars() { } else { Expect(baseDomain).NotTo(BeEmpty(), "The `baseDomain` is required.") testOptions.HubCluster.BaseDomain = baseDomain - testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", baseDomain) + // testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", baseDomain) + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:443", baseDomain) } if testOptions.HubCluster.User != "" { From 4631e471889e4fba02a2040a8a5e54b84391f38b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 31 Jan 2024 18:29:13 +0800 Subject: [PATCH 111/150] update api port to 443 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability-e2e-test_suite_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index 43361afb3..0904cb608 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -216,7 +216,7 @@ func initVars() { if testOptions.HubCluster.ClusterServerURL == "" { testOptions.HubCluster.ClusterServerURL = fmt.Sprintf( - "https://api.%s:6443", + "https://api.%s:443", testOptions.HubCluster.BaseDomain, ) } From e728c621a15e11e0608c640e9df944fc795ca405 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 5 Feb 2024 08:49:20 +0800 Subject: [PATCH 112/150] add condition judgment for rosa hcp Signed-off-by: Chang Liang Qu --- .../observability-e2e-test_suite_test.go | 28 ++++-- tests/pkg/utils/mco_grafana.go | 89 +++++++++++++++++-- 2 files changed, 106 insertions(+), 11 deletions(-) diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index 0904cb608..262e12d9d 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "math/rand" "os" + "strings" "testing" "time" @@ -211,20 +212,35 @@ func initVars() { testOptions.KubeConfig = kubeconfig } + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" if testOptions.HubCluster.BaseDomain != "" { baseDomain = testOptions.HubCluster.BaseDomain - if testOptions.HubCluster.ClusterServerURL == "" { - testOptions.HubCluster.ClusterServerURL = fmt.Sprintf( - "https://api.%s:443", - testOptions.HubCluster.BaseDomain, - ) + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf( + "https://api.%s:443", + testOptions.HubCluster.BaseDomain, + ) + } else { + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf( + "https://api.%s:6443", + testOptions.HubCluster.BaseDomain, + ) + } } } else { Expect(baseDomain).NotTo(BeEmpty(), "The `baseDomain` is required.") testOptions.HubCluster.BaseDomain = baseDomain // testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", baseDomain) - testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:443", baseDomain) + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:443", baseDomain) + } else { + testOptions.HubCluster.ClusterServerURL = fmt.Sprintf("https://api.%s:6443", baseDomain) + } } if testOptions.HubCluster.User != "" { diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index 3dd550c0f..5c38f593e 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -3,12 +3,91 @@ package utils +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "time" + + "gopkg.in/yaml.v2" + "k8s.io/klog" +) + +var ( + testHeadless bool + + BearerToken string + baseDomain string + kubeadminUser string + kubeadminCredential string + kubeconfig string + reportFile string + optionsFile string + ownerPrefix, ocpRelease string + + testOptions TestOptions + testOptionsContainer TestOptionsContainer + testUITimeout time.Duration + + testFailed = false +) + func GetGrafanaURL(opt TestOptions) string { - grafanaConsoleURL := "https://grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain - if opt.HubCluster.GrafanaURL != "" { - grafanaConsoleURL = opt.HubCluster.GrafanaURL + if optionsFile == "" { + optionsFile = os.Getenv("OPTIONS") + if optionsFile == "" { + optionsFile = "resources/options.yaml" + } + } + + klog.V(1).Infof("options filename=%s", optionsFile) + + data, err := ioutil.ReadFile(optionsFile) + if err != nil { + klog.Errorf("--options error: %v", err) + } + + fmt.Printf("file preview: %s \n", string(optionsFile)) + + err = yaml.Unmarshal([]byte(data), &testOptionsContainer) + if err != nil { + klog.Errorf("--options error: %v", err) + } + + testOptions = testOptionsContainer.Options + + // default Headless is `true` + // to disable, set Headless: false + // in options file + if testOptions.Headless == "" { + testHeadless = true + } else { + if testOptions.Headless == "false" { + testHeadless = false + } else { + testHeadless = true + } + } + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + grafanaConsoleURL := "https://grafana-open-cluster-management-observability.apps.rosa." + opt.HubCluster.BaseDomain + if opt.HubCluster.GrafanaURL != "" { + grafanaConsoleURL = opt.HubCluster.GrafanaURL + } else { + opt.HubCluster.GrafanaHost = "grafana-open-cluster-management-observability.apps.rosa." + opt.HubCluster.BaseDomain + } + return grafanaConsoleURL } else { - opt.HubCluster.GrafanaHost = "grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain + grafanaConsoleURL := "https://grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain + if opt.HubCluster.GrafanaURL != "" { + grafanaConsoleURL = opt.HubCluster.GrafanaURL + } else { + opt.HubCluster.GrafanaHost = "grafana-open-cluster-management-observability.apps." + opt.HubCluster.BaseDomain + } + return grafanaConsoleURL } - return grafanaConsoleURL } From 0be8e8280d1419cf65716a2eb284e7f5cc5e68bc Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 6 Mar 2024 14:50:29 +0800 Subject: [PATCH 113/150] update case for 8509 to validate install and namespace changes Signed-off-by: Chang Liang Qu --- tests/benchmark/setup-metrics-collector.sh | 12 +++++----- .../observability-e2e-test_suite_test.go | 2 +- tests/pkg/tests/observability_install_test.go | 24 +++++++++---------- tests/pkg/utils/mco_deploy.go | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tests/benchmark/setup-metrics-collector.sh b/tests/benchmark/setup-metrics-collector.sh index 5fcae62dd..d1d8491a4 100755 --- a/tests/benchmark/setup-metrics-collector.sh +++ b/tests/benchmark/setup-metrics-collector.sh @@ -41,19 +41,19 @@ do kubectl create ns ${cluster_name} # create ca/sa/rolebinding for metrics collector - kubectl get configmap metrics-collector-serving-certs-ca-bundle -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get secret observability-managed-cluster-certs -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - - kubectl get sa endpoint-observability-operator-sa -n open-cluster-management-addon-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get configmap metrics-collector-serving-certs-ca-bundle -n open-cluster-management-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-controller-open-cluster-management.io-observability-signer-client-cert -n open-cluster-management-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get secret observability-managed-cluster-certs -n open-cluster-management-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - + kubectl get sa endpoint-observability-operator-sa -n open-cluster-management-observability -o json | jq 'del(.metadata.namespace,.metadata.resourceVersion,.metadata.uid) | .metadata.creationTimestamp=null' | kubectl apply -n ${cluster_name} -f - kubectl -n ${cluster_name} patch secret observability-managed-cluster-certs --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' kubectl -n ${cluster_name} patch sa endpoint-observability-operator-sa --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' # deploy metrics collector deployment to cluster ns deploy_yaml_file=${cluster_name}-metrics-collector-deployment.yaml - kubectl get deploy metrics-collector-deployment -n open-cluster-management-addon-observability -o yaml > $deploy_yaml_file + kubectl get deploy metrics-collector-deployment -n open-cluster-management-observability -o yaml > $deploy_yaml_file $sed_command "s~cluster=.*$~cluster=${cluster_name}\"~g" "$deploy_yaml_file" $sed_command "s~clusterID=.*$~clusterID=$(cat /proc/sys/kernel/random/uuid)\"~g" "$deploy_yaml_file" - $sed_command "s~namespace:\ open-cluster-management-addon-observability~namespace:\ ${cluster_name}~g" "$deploy_yaml_file" + $sed_command "s~namespace:\ open-cluster-management-observability~namespace:\ ${cluster_name}~g" "$deploy_yaml_file" cat "$deploy_yaml_file" | kubectl -n ${cluster_name} apply -f - rm -rf "$deploy_yaml_file" kubectl -n ${cluster_name} patch deploy metrics-collector-deployment --type='json' -p='[{"op": "replace", "path": "/metadata/ownerReferences", "value": []}]' diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index 262e12d9d..bcc3a7428 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -48,7 +48,7 @@ const ( MCO_CR_NAME = "observability" MCO_NAMESPACE = "open-cluster-management-observability" - MCO_ADDON_NAMESPACE = "open-cluster-management-addon-observability" + MCO_ADDON_NAMESPACE = "open-cluster-management-observability" MCO_LABEL = "name=multicluster-observability-operator" MCO_LABEL_OWNER = "owner=multicluster-observability-operator" diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go index bf69d2b2c..81e8b14d5 100644 --- a/tests/pkg/tests/observability_install_test.go +++ b/tests/pkg/tests/observability_install_test.go @@ -227,18 +227,18 @@ func installMCO() { testFailed = false return nil }, EventuallyTimeoutMinute*25, EventuallyIntervalSecond*10).Should(Succeed()) - - By("Check endpoint-operator and metrics-collector pods are ready") - Eventually(func() error { - err = utils.CheckAllOBAsEnabled(testOptions) - if err != nil { - testFailed = true - return err - } - testFailed = false - return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*10).Should(Succeed()) - + /* + By("Check endpoint-operator and metrics-collector pods are ready") + Eventually(func() error { + err = utils.CheckAllOBAsEnabled(testOptions) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*10).Should(Succeed()) + */ By("Check clustermanagementaddon CR is created") Eventually(func() error { _, err := dynClient.Resource(utils.NewMCOClusterManagementAddonsGVR()). diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index 838f87a9e..bb8b180a8 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -28,7 +28,7 @@ const ( MCO_COMPONENT_LABEL = "observability.open-cluster-management.io/name=" + MCO_CR_NAME OBSERVATORIUM_COMPONENT_LABEL = "app.kubernetes.io/part-of=observatorium" MCO_NAMESPACE = "open-cluster-management-observability" - MCO_ADDON_NAMESPACE = "open-cluster-management-addon-observability" + MCO_ADDON_NAMESPACE = "open-cluster-management-observability" MCO_PULL_SECRET_NAME = "multiclusterhub-operator-pull-secret" OBJ_SECRET_NAME = "thanos-object-storage" // #nosec MCO_GROUP = "observability.open-cluster-management.io" From 4fa697cc16cd93cf4538332906b9b9fc857dfd75 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 7 Mar 2024 14:25:29 +0800 Subject: [PATCH 114/150] fix auto 1260 cases failures Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 43 +++++++++++++-------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index ccd0f8f6f..a1f7837e5 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -50,12 +50,14 @@ var _ = Describe("", func() { Expect(requests["memory"]).To(Equal("100Mi")) }) - It("[Stable] Should have resource requirement in metrics-collector", func() { - By("Check metrics-collector resource requirement") - Eventually(func() error { - return utils.CheckMCOAddonResources(testOptions) - }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - }) + /* + It("[Stable] Should have resource requirement in metrics-collector", func() { + By("Check metrics-collector resource requirement") + Eventually(func() error { + return utils.CheckMCOAddonResources(testOptions) + }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) + }) + */ It("[Stable] Should not have the expected MCO addon pods when disable observabilityaddon", func() { Eventually(func() error { @@ -64,22 +66,29 @@ var _ = Describe("", func() { By("Waiting for MCO addon components scales to 0") Eventually(func() error { - err, podList := utils.GetPodList( - testOptions, - false, - MCO_ADDON_NAMESPACE, - "component=metrics-collector", - ) + err = utils.CheckAllOBAsDeleted(testOptions) + if err != nil { return fmt.Errorf("Failed to disable observability addon") } - if len(podList.Items) != 0 { - for _, po := range podList.Items { - if po.Status.Phase == "Running" { - return fmt.Errorf("Failed to disable observability addon, there is still metrics-collector pod in Running") + /* + err, podList := utils.GetPodList( + testOptions, + false, + MCO_ADDON_NAMESPACE, + "component=metrics-collector", + ) + if err != nil { + return fmt.Errorf("Failed to disable observability addon") + } + if len(podList.Items) != 0 { + for _, po := range podList.Items { + if po.Status.Phase == "Running" { + return fmt.Errorf("Failed to disable observability addon, there is still metrics-collector pod in Running") + } } } - } + */ return nil }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) // according to PR - stolostron/multicluster-observability-operator#886 From ab095989329fa01fa01330c93463afb1b8636aa1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 7 Mar 2024 14:57:14 +0800 Subject: [PATCH 115/150] fix 1418 auto failures Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_oba.go | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/tests/pkg/utils/mco_oba.go b/tests/pkg/utils/mco_oba.go index 5c035badf..5ff186264 100644 --- a/tests/pkg/utils/mco_oba.go +++ b/tests/pkg/utils/mco_oba.go @@ -75,18 +75,22 @@ func CheckAllOBAsEnabled(opt TestOptions) error { klog.V(1).Infof("Have the following managedclusters: <%v>", clusters) for _, cluster := range clusters { - klog.V(1).Infof("Check OBA status for cluster <%v>", cluster) - err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) - if err != nil { - return err - } + if cluster != `local-cluster` { + klog.V(1).Infof("Check OBA status for cluster <%v>", cluster) + err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) + if err != nil { + return err + } - klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) - err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddonsStatusMessage) - if err != nil { - return err + klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) + err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddonsStatusMessage) + if err != nil { + return err + } } + } + return nil } @@ -99,12 +103,6 @@ func CheckAllOBAsEnabledLocal(opt TestOptions) error { for _, cluster := range clusters { if cluster == `local-cluster` { - klog.V(1).Infof("Check OBA status for cluster <%v>", cluster) - err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnEnabledMessage) - if err != nil { - return err - } - klog.V(1).Infof("Check managedcluster addon status for cluster <%v>", cluster) err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddonsStatusMessage) if err != nil { @@ -121,10 +119,6 @@ func CheckAllOBADisabled(opt TestOptions) error { return err } for _, cluster := range clusters { - err = CheckOBAStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) - if err != nil { - return err - } err = CheckManagedClusterAddonsStatus(opt, cluster, ManagedClusterAddOnDisabledMessage) if err != nil { return err From cf395e4fc5f574636568e484126220d45927b7a6 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 7 Mar 2024 15:36:06 +0800 Subject: [PATCH 116/150] fix auto 1288 failures Signed-off-by: Chang Liang Qu --- .../tests/observability_deployment_test.go | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 0b251c9d6..182124f7b 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -5,12 +5,14 @@ package tests import ( "context" + "fmt" "os" "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" ) @@ -55,15 +57,37 @@ var _ = Describe("", func() { }) It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { - By("Check endpoint-operator and metrics-collector pods are ready") + By("Check etrics-collector pod is ready") Eventually(func() error { - err = utils.CheckAllOBAsEnabledLocal(testOptions) + + err, podList := utils.GetPodList( + testOptions, + false, + "open-cluster-management-observability", + "component=metrics-collector", + ) + if err != nil { - testFailed = true - return err + return fmt.Errorf("Failed to get the pod metrics-collector") + } + if len(podList.Items) != 0 { + for _, po := range podList.Items { + if po.Status.Phase == "Running" { + klog.V(1).Infof("metrics-collector pod in Running") + return nil + } + } } - testFailed = false return nil + /* + err = utils.CheckAllOBAsEnabledLocal(testOptions) + if err != nil { + testFailed = true + return err + } + testFailed = false + return nil + */ }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*10).Should(Succeed()) }) From d855e6a0c340187f7bd16aa3a9b3deaadd6a6888 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 7 Mar 2024 16:08:43 +0800 Subject: [PATCH 117/150] fix auto 1657 failures Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_deploy.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index bb8b180a8..bf486a97c 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -134,6 +134,18 @@ func GetAllMCOPods(opt TestOptions) ([]corev1.Pod, error) { // ignore non-mco pods mcoPods := []corev1.Pod{} for _, p := range podList.Items { + if strings.Contains(p.GetName(), "metrics-collector") { + continue + } + + if strings.Contains(p.GetName(), "endpoint-observability-operator") { + continue + } + + if strings.Contains(p.GetName(), "uwl-metrics-collector") { + continue + } + if strings.Contains(p.GetName(), "grafana-test") { continue } From 69d384b00ed6be435c4d4e1505b1946bff7be650 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 11 Mar 2024 09:43:53 +0800 Subject: [PATCH 118/150] remove hcp metrics due to they always not existing Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index 27f7fca75..8d7b13ea5 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -78,3 +78,18 @@ policy:policy_governance_info:propagated_count policy:policy_governance_info:propagated_noncompliant_count namespace_cpu:kube_pod_container_resource_requests:sum csv_abnormal +mce_hs_addon_request_based_hcp_capacity_gauge +mce_hs_addon_low_qps_based_hcp_capacity_gauge +mce_hs_addon_medium_qps_based_hcp_capacity_gauge +mce_hs_addon_high_qps_based_hcp_capacity_gauge +mce_hs_addon_average_qps_based_hcp_capacity_gauge +mce_hs_addon_total_hosted_control_planes_gauge +mce_hs_addon_available_hosted_control_planes_gauge +mce_hs_addon_available_hosted_clusters_gauge +mce_hs_addon_deleted_hosted_clusters_gauge +mce_hs_addon_hypershift_operator_degraded_bool +mce_hs_addon_hosted_control_planes_status_gauge +mce_hs_addon_qps_based_hcp_capacity_gauge +mce_hs_addon_worker_node_resource_capacities_gauge +mce_hs_addon_qps_gauge +mce_hs_addon_request_based_hcp_capacity_current_gauge From 0e211614cee3f24307a5bba32f9e1dcba23dbf26 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 20 Mar 2024 08:24:28 +0800 Subject: [PATCH 119/150] sync lastest scripts into the automation Signed-off-by: Chang Liang Qu --- tools/generate-dashboard-configmap-yaml.sh | 92 ++++++------ tools/setup-grafana-dev.sh | 164 ++++++++++----------- tools/switch-to-grafana-admin.sh | 94 ++++++------ 3 files changed, 171 insertions(+), 179 deletions(-) diff --git a/tools/generate-dashboard-configmap-yaml.sh b/tools/generate-dashboard-configmap-yaml.sh index 363898d39..bf35c6d02 100755 --- a/tools/generate-dashboard-configmap-yaml.sh +++ b/tools/generate-dashboard-configmap-yaml.sh @@ -4,18 +4,15 @@ obs_namespace='open-cluster-management-observability' -if command -v python &> /dev/null -then - PYTHON_CMD="python" -elif command -v python2 &> /dev/null -then - PYTHON_CMD="python2" -elif command -v python3 &> /dev/null -then - PYTHON_CMD="python3" +if command -v python &>/dev/null; then + PYTHON_CMD="python" +elif command -v python2 &>/dev/null; then + PYTHON_CMD="python2" +elif command -v python3 &>/dev/null; then + PYTHON_CMD="python3" else - echo "Failed to found python command, please install firstly" - exit 1 + echo "Failed to found python command, please install firstly" + exit 1 fi usage() { @@ -45,67 +42,66 @@ start() { savePath=$2 fi org_dashboard_name=$1 - dashboard_name=`echo ${1//[!(a-z\A-Z\0-9\-\.)]/-} | tr '[:upper:]' '[:lower:]'` - - while [[ $# -gt 0 ]] - do - key="$1" - case $key in - -h|--help) - usage - ;; - - -n|--namespace) - obs_namespace="$2" - shift - shift - ;; + dashboard_name=$(echo ${1//[!(a-z\A-Z\0-9\-\.)]/-} | tr '[:upper:]' '[:lower:]') + + while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -h | --help) + usage + ;; + + -n | --namespace) + obs_namespace="$2" + shift + shift + ;; *) - shift - ;; - esac + shift + ;; + esac done if [ ! -d $savePath ]; then mkdir -p $savePath if [ $? -ne 0 ]; then - echo "Failed to create directory <$savePath>" - exit 1 + echo "Failed to create directory <$savePath>" + exit 1 fi fi - podName=`kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'` + podName=$(kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') if [ $? -ne 0 ] || [ -z "$podName" ]; then - echo "Failed to get grafana pod name, please check your grafana-dev deployment" - exit 1 + echo "Failed to get grafana pod name, please check your grafana-dev deployment" + exit 1 fi curlCMD="kubectl exec -it -n "$obs_namespace" $podName -c grafana-dashboard-loader -- /usr/bin/curl" XForwardedUser="WHAT_YOU_ARE_DOING_IS_VOIDING_SUPPORT_0000000000000000000000000000000000000000000000000000000000000000" - dashboards=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User: $XForwardedUser" 127.0.0.1:3001/api/search` + dashboards=$($curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User: $XForwardedUser" 127.0.0.1:3001/api/search) if [ $? -ne 0 ]; then - echo "Failed to search dashboards, please check your grafana-dev instance" - exit 1 + echo "Failed to search dashboards, please check your grafana-dev instance" + exit 1 fi - dashboard=`echo $dashboards | $PYTHON_CMD -c "import sys, json;[sys.stdout.write(json.dumps(dash)) for dash in json.load(sys.stdin) if dash['title'] == '$org_dashboard_name']"` + dashboard=$(echo $dashboards | $PYTHON_CMD -c "import sys, json;[sys.stdout.write(json.dumps(dash)) for dash in json.load(sys.stdin) if dash['title'] == '$org_dashboard_name']") + + dashboardUID=$(echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['uid'])" 2>/dev/null) + dashboardFolderId=$(echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderId'])" 2>/dev/null) + dashboardFolderTitle=$(echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderTitle'])" 2>/dev/null) - dashboardUID=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['uid'])" 2>/dev/null` - dashboardFolderId=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderId'])" 2>/dev/null` - dashboardFolderTitle=`echo $dashboard | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['folderTitle'])" 2>/dev/null` - - dashboardJson=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/dashboards/uid/$dashboardUID | $PYTHON_CMD -c "import sys, json; print(json.dumps(json.load(sys.stdin)['dashboard']))" 2>/dev/null` + dashboardJson=$($curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/dashboards/uid/$dashboardUID | $PYTHON_CMD -c "import sys, json; print(json.dumps(json.load(sys.stdin)['dashboard']))" 2>/dev/null) if [ $? -ne 0 ]; then - echo "Failed to fetch dashboard json data, please check your dashboard name <$org_dashboard_name>" - exit 1 + echo "Failed to fetch dashboard json data, please check your dashboard name <$org_dashboard_name>" + exit 1 fi # delete dashboard uid avoid conflict with old dashboard - dashboardJson=`echo $dashboardJson | $PYTHON_CMD -c "import sys, json; d=json.load(sys.stdin);del d['uid'];print(json.dumps(d))"` + dashboardJson=$(echo $dashboardJson | $PYTHON_CMD -c "import sys, json; d=json.load(sys.stdin);del d['uid'];print(json.dumps(d))") if [ $dashboardFolderId -ne 0 ]; then - cat > $savePath/$dashboard_name.yaml <$savePath/$dashboard_name.yaml < $savePath/$dashboard_name.yaml <$savePath/$dashboard_name.yaml < grafana-dev-config.ini + oc get secret -n "$obs_namespace" grafana-config -o 'go-template={{index .data "grafana.ini"}}' | base64 --decode >grafana-dev-config.ini if [ $? -ne 0 ]; then - echo "Failed to get grafana config secret" - exit 1 + echo "Failed to get grafana config secret" + exit 1 fi $sed_command "s~%(domain)s/grafana/$~%(domain)s/grafana-dev/~g" grafana-dev-config.ini - kubectl create secret generic grafana-dev-config -n "$obs_namespace" --from-file=grafana.ini=grafana-dev-config.ini + oc create secret generic grafana-dev-config -n "$obs_namespace" --from-file=grafana.ini=grafana-dev-config.ini - kubectl get deployment -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml > grafana-dev-deploy.yaml + oc get deployment -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml >grafana-dev-deploy.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana deployment" - exit 1 + echo "Failed to get grafana deployment" + exit 1 fi $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-deploy.yaml $sed_command "s~name: observability-grafana$~name: grafana-dev~g" grafana-dev-deploy.yaml @@ -48,13 +48,13 @@ deploy() { $sed_command "s~grafana-config$~grafana-dev-config~g" grafana-dev-deploy.yaml $sed_command "s~- multicluster-observability-grafana$~- multicluster-observability-grafana-dev~g" grafana-dev-deploy.yaml - POD_NAME=$(kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana |grep grafana|awk '{split($0, a, " "); print a[1]}' |head -n 1) + POD_NAME=$(oc get pods -n "$obs_namespace" -l app=multicluster-observability-grafana | grep grafana | awk '{split($0, a, " "); print a[1]}' | head -n 1) if [ -z "$POD_NAME" ]; then echo "Failed to get grafana pod name" exit 1 fi - GROUP_ID=$(kubectl get pods "$POD_NAME" -n "$obs_namespace" -o jsonpath='{.spec.securityContext.fsGroup}') + GROUP_ID=$(oc get pods "$POD_NAME" -n "$obs_namespace" -o jsonpath='{.spec.securityContext.fsGroup}') if [[ ${GROUP_ID} == "grafana" ]]; then GROUP_ID=472 fi @@ -63,15 +63,16 @@ deploy() { $sed_command "s~secretName: grafana-tls$~secretName: grafana-tls-dev~g" grafana-dev-deploy.yaml $sed_command "s~--client-id=.*$~--client-id=grafana-proxy-client-dev~g" grafana-dev-deploy.yaml $sed_command "s~--client-secret=.*$~--client-secret=grafana-proxy-client-dev~g" grafana-dev-deploy.yaml - $sed_command "s~ securityContext:.*$~ securityContext: {fsGroup: ${GROUP_ID}}~g" grafana-dev-deploy.yaml - sed "s~- emptyDir: {}$~- persistentVolumeClaim:$ claimName: grafana-dev~g" grafana-dev-deploy.yaml > grafana-dev-deploy.yaml.bak - tr $ '\n' < grafana-dev-deploy.yaml.bak > grafana-dev-deploy.yaml - kubectl apply -f grafana-dev-deploy.yaml + $sed_command "s~ securityContext:\n*$~ securityContext:\n fsGroup: ${GROUP_ID}~g" grafana-dev-deploy.yaml + $sed_command "s~ securityContext: {}*$~ securityContext: {fsGroup: ${GROUP_ID}}~g" grafana-dev-deploy.yaml + sed "s~- emptyDir: {}$~- persistentVolumeClaim:$ claimName: grafana-dev~g" grafana-dev-deploy.yaml >grafana-dev-deploy.yaml.bak + tr $ '\n' grafana-dev-deploy.yaml + oc apply -f grafana-dev-deploy.yaml - kubectl get svc -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml > grafana-dev-svc.yaml + oc get svc -n "$obs_namespace" -l app=multicluster-observability-grafana -o yaml >grafana-dev-svc.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana service" - exit 1 + echo "Failed to get grafana service" + exit 1 fi $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-svc.yaml $sed_command "s~app: multicluster-observability-grafana$~app: multicluster-observability-grafana-dev~g" grafana-dev-svc.yaml @@ -82,37 +83,35 @@ deploy() { $sed_command "s~service.alpha.openshift.io/serving-cert-secret-name:.*$~service.alpha.openshift.io/serving-cert-secret-name: grafana-tls-dev~g" grafana-dev-svc.yaml $sed_command "s~service.alpha.openshift.io/serving-cert-signed-by:.*$~~g" grafana-dev-svc.yaml $sed_command "s~service.beta.openshift.io/serving-cert-signed-by:.*$~~g" grafana-dev-svc.yaml - kubectl apply -f grafana-dev-svc.yaml + oc apply -f grafana-dev-svc.yaml - - kubectl get sa -n "$obs_namespace" grafana -o yaml > grafana-dev-sa.yaml + oc get sa -n "$obs_namespace" grafana -o yaml >grafana-dev-sa.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana serviceaccount" - exit 1 + echo "Failed to get grafana serviceaccount" + exit 1 fi $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-sa.yaml $sed_command 's/{"kind":"Route","name":"grafana"}/{"kind":"Route","name":"grafana-dev"}/g' grafana-dev-sa.yaml - kubectl apply -f grafana-dev-sa.yaml + oc apply -f grafana-dev-sa.yaml - kubectl get clusterrolebinding open-cluster-management:grafana-crb -o yaml > grafana-dev-crb.yaml + oc get clusterrolebinding open-cluster-management:grafana-crb -o yaml >grafana-dev-crb.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana cluster role binding" - exit 1 + echo "Failed to get grafana cluster role binding" + exit 1 fi $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-crb.yaml $sed_command "s~name: open-cluster-management:grafana-crb$~name: open-cluster-management:grafana-crb-dev~g" grafana-dev-crb.yaml - cat grafana-dev-crb.yaml - kubectl apply -f grafana-dev-crb.yaml + oc apply -f grafana-dev-crb.yaml - kubectl get route -n "$obs_namespace" grafana -o yaml > grafana-dev-route.yaml + oc get route -n "$obs_namespace" grafana -o yaml >grafana-dev-route.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana route" - exit 1 + echo "Failed to get grafana route" + exit 1 fi $sed_command "s~name: grafana$~name: grafana-dev~g" grafana-dev-route.yaml $sed_command "s~host:.*$~~g" grafana-dev-route.yaml - kubectl apply -f grafana-dev-route.yaml - + oc apply -f grafana-dev-route.yaml + cat >grafana-pvc.yaml < grafana-dev-oauthclient.yaml + oc get oauthclient grafana-proxy-client -o yaml >grafana-dev-oauthclient.yaml if [ $? -ne 0 ]; then - echo "Failed to get grafana oauthclient" - exit 1 + echo "Failed to get grafana oauthclient" + exit 1 fi $sed_command "s~name: grafana-proxy-client$~name: grafana-proxy-client-dev~g" grafana-dev-oauthclient.yaml $sed_command "s/https:\/\/grafana-/https:\/\/grafana-dev-/g" grafana-dev-oauthclient.yaml $sed_command "s~secret: .*$~secret: grafana-proxy-client-dev~g" grafana-dev-oauthclient.yaml - kubectl apply -f grafana-dev-oauthclient.yaml + oc apply -f grafana-dev-oauthclient.yaml # clean all tmp files rm -rf grafana-dev-deploy.yaml* grafana-dev-svc.yaml* grafana-dev-sa.yaml* grafana-dev-route.yaml* grafana-dev-crb.yaml* grafana-dev-oauthclient.yaml* grafana-dev-config.ini* grafana-pvc.yaml* + # delete ownerReferences - kubectl -n "$obs_namespace" patch deployment grafana-dev -p '{"metadata": {"ownerReferences":null}}' - kubectl -n "$obs_namespace" patch svc grafana-dev -p '{"metadata": {"ownerReferences":null}}' - kubectl -n "$obs_namespace" patch route grafana-dev -p '{"metadata": {"ownerReferences":null}}' - kubectl patch oauthclient grafana-proxy-client-dev -p '{"metadata": {"ownerReferences":null}}' - kubectl patch clusterrolebinding open-cluster-management:grafana-crb-dev -p '{"metadata": {"ownerReferences":null}}' + oc -n "$obs_namespace" patch deployment grafana-dev -p '{"metadata": {"ownerReferences":null}}' + oc -n "$obs_namespace" patch svc grafana-dev -p '{"metadata": {"ownerReferences":null}}' + oc -n "$obs_namespace" patch route grafana-dev -p '{"metadata": {"ownerReferences":null}}' + oc patch oauthclient grafana-proxy-client-dev -p '{"metadata": {"ownerReferences":null}}' + oc patch clusterrolebinding open-cluster-management:grafana-crb-dev -p '{"metadata": {"ownerReferences":null}}' + echo -e "\nGrafana dev URL: $(oc get route grafana-dev -n open-cluster-management-observability --no-headers | awk '{print $2}')" } clean() { - kubectl delete secret -n "$obs_namespace" grafana-dev-config - kubectl delete deployment -n "$obs_namespace" grafana-dev - kubectl delete svc -n "$obs_namespace" grafana-dev - kubectl delete sa -n "$obs_namespace" grafana-dev - kubectl delete route -n "$obs_namespace" grafana-dev - kubectl delete pvc -n "$obs_namespace" grafana-dev - kubectl delete oauthclient grafana-proxy-client-dev - kubectl delete clusterrolebinding open-cluster-management:grafana-crb-dev + oc delete secret -n "$obs_namespace" grafana-dev-config + oc delete deployment -n "$obs_namespace" grafana-dev + oc delete svc -n "$obs_namespace" grafana-dev + oc delete sa -n "$obs_namespace" grafana-dev + oc delete route -n "$obs_namespace" grafana-dev + oc delete pvc -n "$obs_namespace" grafana-dev + oc delete oauthclient grafana-proxy-client-dev + oc delete clusterrolebinding open-cluster-management:grafana-crb-dev } msg() { @@ -182,39 +183,38 @@ start() { usage fi - while [[ $# -gt 0 ]] - do - key="$1" - case $key in - -h|--help) - usage - ;; - - -n|--namespace) - obs_namespace="$2" - shift - shift - ;; - - -c|--clean) - clean - exit 0 - ;; - - -d|--deploy) - deploy_flag=1 - shift - ;; + while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -h | --help) + usage + ;; + + -n | --namespace) + obs_namespace="$2" + shift + shift + ;; + + -c | --clean) + clean + exit 0 + ;; + + -d | --deploy) + deploy_flag=1 + shift + ;; *) - usage - ;; - esac + usage + ;; + esac done if [ $deploy_flag -eq 1 ]; then - deploy - exit + deploy + exit fi } diff --git a/tools/switch-to-grafana-admin.sh b/tools/switch-to-grafana-admin.sh index d425e177f..e371df5fd 100755 --- a/tools/switch-to-grafana-admin.sh +++ b/tools/switch-to-grafana-admin.sh @@ -4,18 +4,15 @@ obs_namespace='open-cluster-management-observability' -if command -v python &> /dev/null -then - PYTHON_CMD="python" -elif command -v python2 &> /dev/null -then - PYTHON_CMD="python2" -elif command -v python3 &> /dev/null -then - PYTHON_CMD="python3" +if command -v python &>/dev/null; then + PYTHON_CMD="python" +elif command -v python2 &>/dev/null; then + PYTHON_CMD="python2" +elif command -v python3 &>/dev/null; then + PYTHON_CMD="python3" else - echo "Failed to found python command, please install firstly" - exit 1 + echo "Failed to found python command, please install firstly" + exit 1 fi usage() { @@ -39,72 +36,71 @@ start() { fi user_name="$1" - while [[ $# -gt 0 ]] - do - key="$1" - case $key in - -h|--help) - usage - ;; - - -n|--namespace) - obs_namespace="$2" - shift - shift - ;; + while [[ $# -gt 0 ]]; do + key="$1" + case $key in + -h | --help) + usage + ;; + + -n | --namespace) + obs_namespace="$2" + shift + shift + ;; *) - shift - ;; - esac + shift + ;; + esac done # if username contains the number sign '#', we need to replace it with '%23' # due to use it in URL parameters username_no_num_sign=$user_name if [[ $user_name == *"#"* ]]; then - username_no_num_sign="${user_name//#/%23}" + username_no_num_sign="${user_name//#/%23}" fi - podName=`kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'` + podName=$(kubectl get pods -n "$obs_namespace" -l app=multicluster-observability-grafana-dev --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}') if [ $? -ne 0 ] || [ -z "$podName" ]; then - echo "Failed to get grafana pod name, please check your grafana-dev deployment" - exit 1 + echo "Failed to get grafana pod name, please check your grafana-dev deployment" + exit 1 fi curlCMD="kubectl exec -it -n "$obs_namespace" $podName -c grafana-dashboard-loader -- /usr/bin/curl" XForwardedUser="WHAT_YOU_ARE_DOING_IS_VOIDING_SUPPORT_0000000000000000000000000000000000000000000000000000000000000000" - userID=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User: $XForwardedUser" 127.0.0.1:3001/api/users/lookup?loginOrEmail=$username_no_num_sign | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['id'])" 2>/dev/null` + userID=$($curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User: $XForwardedUser" 127.0.0.1:3001/api/users/lookup?loginOrEmail=$username_no_num_sign | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['id'])" 2>/dev/null) if [ $? -ne 0 ]; then - echo "Failed to fetch user ID, please check your user name" - exit 1 + echo "Failed to fetch user ID, please check your user name" + exit 1 fi - - orgID=`$curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/users/lookup?loginOrEmail=$username_no_num_sign | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['orgId'])" 2>/dev/null` + + orgID=$($curlCMD -s -X GET -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/users/lookup?loginOrEmail=$username_no_num_sign | $PYTHON_CMD -c "import sys, json; print(json.load(sys.stdin)['orgId'])" 2>/dev/null) if [ $? -ne 0 ]; then - echo "Failed to fetch organization ID, please check your user name" - exit 1 + echo "Failed to fetch organization ID, please check your user name" + exit 1 fi - $curlCMD -s -X DELETE -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users/$userID > /dev/null + $curlCMD -s -X DELETE -H "Content-Type: application/json" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users/$userID >/dev/null if [ $? -ne 0 ]; then - echo "Failed to delete user <$user_name>" - exit 1 + echo "Failed to delete user <$user_name>" + exit 1 fi - $curlCMD -s -X POST -H "Content-Type: application/json" -d "{\"loginOrEmail\":\"$user_name\", \"role\": \"Admin\"}" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users > /dev/null + $curlCMD -s -X POST -H "Content-Type: application/json" -d "{\"loginOrEmail\":\"$user_name\", \"role\": \"Admin\"}" -H "X-Forwarded-User:$XForwardedUser" 127.0.0.1:3001/api/orgs/$orgID/users >/dev/null if [ $? -ne 0 ]; then - echo "Failed to switch the user <$user_name> to be grafana admin" - exit 1 + echo "Failed to switch the user <$user_name> to be grafana admin" + exit 1 fi echo "User <$user_name> switched to be grafana admin" # disable getting start -# kubectl exec -it -n "$obs_namespace" $podName -c grafana-dev -- sqlite3 /var/lib/grafana/grafana.db "update user set help_flags1=1 where id=$userID;" > /dev/null -# if [ $? -ne 0 ]; then -# echo "Failed to disable getting start for the user <$user_name>" -# exit 1 -# fi + # kubectl exec -it -n "$obs_namespace" $podName -c grafana-dev -- sqlite3 /var/lib/grafana/grafana.db "update user set help_flags1=1 where id=$userID;" > /dev/null + # if [ $? -ne 0 ]; then + # echo "Failed to disable getting start for the user <$user_name>" + # exit 1 + # fi } From 7d9035f450968cb7d8a00589390367046242e3f2 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 20 Mar 2024 11:15:12 +0800 Subject: [PATCH 120/150] remove the unavailable metrics data Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index 8d7b13ea5..0d650c6f4 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -93,3 +93,4 @@ mce_hs_addon_qps_based_hcp_capacity_gauge mce_hs_addon_worker_node_resource_capacities_gauge mce_hs_addon_qps_gauge mce_hs_addon_request_based_hcp_capacity_current_gauge +coredns_forward_responses_total From d7309316aa28d0a8015925bf7856c9b4aa537a5a Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 19 Apr 2024 17:15:07 +0800 Subject: [PATCH 121/150] add ignore metrics for rosa hcp Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index 0d650c6f4..a8fcfda88 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -94,3 +94,4 @@ mce_hs_addon_worker_node_resource_capacities_gauge mce_hs_addon_qps_gauge mce_hs_addon_request_based_hcp_capacity_current_gauge coredns_forward_responses_total +cnv:vmi_status_running:count From 19d64c13634c147049a37ad7e91e7cc18aa08942 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 26 Apr 2024 15:31:23 +0800 Subject: [PATCH 122/150] add auto case for case 6923 Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index a1f7837e5..1a61e0501 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -4,11 +4,13 @@ package tests import ( + "context" "fmt" "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" "github.com/stolostron/multicluster-observability-operator/tests/pkg/utils" @@ -157,6 +159,18 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) + It("RHACM4K-6923: Observability: Verify default scrap interval change to 5 minutes - [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g2)", func() { + By("Check default interval value is 300") + Eventually(func() bool { + mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) + if getErr != nil { + panic(getErr.Error()) + } + observabilityAddonSpec := mco.Object["spec"].(map[string]interface{})["observabilityAddonSpec"].(map[string]interface{}) + return observabilityAddonSpec["interval"] == int64(300) + }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) + }) + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { From 866d23fb20a6e50e505922866333176a79ad7b67 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 13 May 2024 15:30:52 +0800 Subject: [PATCH 123/150] ignore kubevirt_hyperconverged_operator_health_status Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index a8fcfda88..ba1fa7eff 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -95,3 +95,4 @@ mce_hs_addon_qps_gauge mce_hs_addon_request_based_hcp_capacity_current_gauge coredns_forward_responses_total cnv:vmi_status_running:count +kubevirt_hyperconverged_operator_health_status From 963cf8055b8d85941fe60e6815f67e6ad1123371 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 23 May 2024 16:50:32 +0800 Subject: [PATCH 124/150] add local-cluster ID into the managedclusterID Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 6 ++-- tests/pkg/utils/mco_managedcluster.go | 36 +++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 807a82c4b..1b056132e 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -333,6 +333,8 @@ var _ = Describe("", func() { } expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + expectedLocalClusterIDs, err := utils.ListLocalClusterIDs(testOptions) + expectedOCPClusterIDs = append(expectedOCPClusterIDs, expectedLocalClusterIDs...) klog.V(3).Infof("expectedOCPClusterIDs is %s", expectedOCPClusterIDs) Expect(err).NotTo(HaveOccurred()) expectedKSClusterNames, err := utils.ListKSManagedClusterNames(testOptions) @@ -402,8 +404,8 @@ var _ = Describe("", func() { klog.V(3).Infof("no sort.Strings.expectedOCPClusterIDs is %s", expectedOCPClusterIDs) sort.Strings(expectedOCPClusterIDs) klog.V(3).Infof("sort.Strings.expectedOCPClusterIDs is %s", expectedOCPClusterIDs) - if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) && !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { - //if !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { + // if !reflect.DeepEqual(clusterIDsInAlerts, expectClusterIdentifiers) && !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { + if !reflect.DeepEqual(clusterIDsInAlerts, expectedOCPClusterIDs) { return fmt.Errorf("Not all openshift managedclusters >=4.8.0 forward Watchdog alert to hub cluster") } diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index 086cdebbf..6b43410e4 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -9,6 +9,7 @@ import ( goversion "github.com/hashicorp/go-version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" ) func UpdateObservabilityFromManagedCluster(opt TestOptions, enableObservability bool) error { @@ -123,6 +124,41 @@ func ListOCPManagedClusterIDs(opt TestOptions, minVersionStr string) ([]string, return clusterIDs, nil } +func ListLocalClusterIDs(opt TestOptions) ([]string, error) { + clientDynamic := GetKubeClientDynamic(opt, true) + objs, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + clusterIDs := []string{} + for _, obj := range objs.Items { + metadata := obj.Object["metadata"].(map[string]interface{}) + labels := metadata["labels"].(map[string]interface{}) + if labels != nil { + vendorStr := "" + if vendor, ok := labels["vendor"]; ok { + vendorStr = vendor.(string) + } + + localClusterLabelStr := "" + if localCluster, ok := labels["local-cluster"]; ok { + localClusterLabelStr = localCluster.(string) + } + if vendorStr == "OpenShift" && localClusterLabelStr == "true" { + clusterIDStr := "" + if clusterID, ok := labels["clusterID"]; ok { + clusterIDStr = clusterID.(string) + } + if len(clusterIDStr) > 0 { + clusterIDs = append(clusterIDs, clusterIDStr) + } + } + } + } + klog.V(3).Infof("clusterIDs is %s", clusterIDs) + return clusterIDs, nil +} + func ListKSManagedClusterNames(opt TestOptions) ([]string, error) { clientDynamic := GetKubeClientDynamic(opt, true) objs, err := clientDynamic.Resource(NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) From bc06f8be701a62bd796d5db40d2daf1ef1bf53c1 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 4 Jul 2024 17:30:01 +0800 Subject: [PATCH 125/150] ignore kubevirt_hco_system_health_status Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index ba1fa7eff..406f2b8d3 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -96,3 +96,4 @@ mce_hs_addon_request_based_hcp_capacity_current_gauge coredns_forward_responses_total cnv:vmi_status_running:count kubevirt_hyperconverged_operator_health_status +kubevirt_hco_system_health_status From 7bd711be0bc7b70f997e7ed356af708aa032dac8 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 5 Jul 2024 13:48:45 +0800 Subject: [PATCH 126/150] increase case 11170 duration to avoid failure Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_export_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_export_test.go b/tests/pkg/tests/observability_export_test.go index 6f27a1aaf..27198df2e 100644 --- a/tests/pkg/tests/observability_export_test.go +++ b/tests/pkg/tests/observability_export_test.go @@ -99,7 +99,7 @@ var _ = Describe("", func() { } //} return nil - }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) JustAfterEach(func() { From d0c934409e2c3a8f861d07c31d497cffd230f883 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 26 Aug 2024 14:50:02 +0800 Subject: [PATCH 127/150] update alertmanager host for rosa hcp Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 53 +++++++++++++++++---- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 1b056132e..e2879c583 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -301,11 +301,30 @@ var _ = Describe("", func() { }) It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e (alertforward/g0)", func() { - amURL := url.URL{ - Scheme: "https", - Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, - Path: "/api/v2/alerts", + + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" + + var amURL *url.URL + + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + amURL = &url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + + } else { + amURL = &url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + } + q := amURL.Query() q.Set("filter", "alertname=Watchdog") amURL.RawQuery = q.Encode() @@ -333,6 +352,7 @@ var _ = Describe("", func() { } expectedOCPClusterIDs, err := utils.ListOCPManagedClusterIDs(testOptions, "4.8.0") + Expect(err).NotTo(HaveOccurred()) expectedLocalClusterIDs, err := utils.ListLocalClusterIDs(testOptions) expectedOCPClusterIDs = append(expectedOCPClusterIDs, expectedLocalClusterIDs...) klog.V(3).Infof("expectedOCPClusterIDs is %s", expectedOCPClusterIDs) @@ -414,10 +434,27 @@ var _ = Describe("", func() { }) It("RHACM4K-22427: Observability: Disable the managedcluster's alerts forward to the Hub [P2][Sev2][Observability][Integration] @e2e (alertforward/g1)", func() { - amURL := url.URL{ - Scheme: "https", - Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, - Path: "/api/v2/alerts", + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" + + var amURL *url.URL + + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + amURL = &url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + + } else { + amURL = &url.URL{ + Scheme: "https", + Host: "alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain, + Path: "/api/v2/alerts", + } + } q := amURL.Query() q.Set("filter", "alertname=Watchdog") From 211da9e2e4fda84a11f5e2fcc103dcbba2541079 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 26 Aug 2024 15:13:32 +0800 Subject: [PATCH 128/150] fix for rbac url rosa hcp Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_route_test.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 6217e53b5..7c80e6ceb 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -40,7 +40,22 @@ var _ = Describe("", func() { It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @post-upgrade @post-restore Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" - url := "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query + + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" + + var url string + + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + + url = "https://rbac-query-proxy-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain + query + + } else { + url = "https://rbac-query-proxy-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query + + } + req, err := http.NewRequest( "GET", url, From 4557c121130d33d9efef6eed7583ca88f153aa84 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 26 Aug 2024 15:56:03 +0800 Subject: [PATCH 129/150] skip rosa hcp on cert testing Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_alert_test.go | 4 ++-- tests/pkg/tests/observability_route_test.go | 10 +++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index e2879c583..31cc90a67 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -309,7 +309,7 @@ var _ = Describe("", func() { var amURL *url.URL if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { - + Skip("skip on rosa-hcp") amURL = &url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain, @@ -441,7 +441,7 @@ var _ = Describe("", func() { var amURL *url.URL if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { - + Skip("skip on rosa-hcp") amURL = &url.URL{ Scheme: "https", Host: "alertmanager-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain, diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 7c80e6ceb..1a8e7533d 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { var url string if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { - + Skip("skip on rosa-hcp") url = "https://rbac-query-proxy-open-cluster-management-observability.apps.rosa." + testOptions.HubCluster.BaseDomain + query } else { @@ -106,6 +106,14 @@ var _ = Describe("", func() { It("@BVT - [P1][Sev1][observability][Integration] Should access alert via alertmanager route (route/g0)", func() { Eventually(func() error { + cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) + substring1 := "rosa" + substring2 := "hcp" + + if strings.Contains(cloudProvider, substring1) && strings.Contains(cloudProvider, substring2) { + Skip("skip on rosa-hcp") + } + query := "/api/v2/alerts" url := "https://alertmanager-open-cluster-management-observability.apps." + testOptions.HubCluster.BaseDomain + query alertJson := ` From 8ee6a3179db36873faf97461d5bef8382e67074d Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Wed, 11 Sep 2024 15:40:10 +0800 Subject: [PATCH 130/150] ignore new addon multicluster-observability-addon-manager affinity checking Signed-off-by: Chang Liang Qu --- tests/pkg/utils/mco_deploy.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index bf486a97c..440d5499e 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -285,6 +285,10 @@ func CheckAllPodsAffinity(opt TestOptions) error { } for _, pod := range podList { + if strings.Contains(pod.Name, "multicluster-observability-addon-manager") { + klog.V(1).Infof("Skip pod multicluster-observability-addon-manager checking") + continue // Skip the rest of the loop for this pod + } if pod.Spec.Affinity == nil { return fmt.Errorf("Failed to check affinity for pod: %v" + pod.GetName()) From 6cdc05c54353a900db21ad349996457529c0cb97 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 12 Sep 2024 11:24:02 +0800 Subject: [PATCH 131/150] add tag for non-ui testing Signed-off-by: Chang Liang Qu --- tests/pkg/tests/observability_addon_test.go | 14 ++++++------- tests/pkg/tests/observability_alert_test.go | 20 +++++++++---------- .../pkg/tests/observability_certrenew_test.go | 2 +- tests/pkg/tests/observability_config_test.go | 10 +++++----- .../pkg/tests/observability_dashboard_test.go | 6 +++--- .../tests/observability_deployment_test.go | 6 +++--- .../observability_endpoint_preserve_test.go | 6 +++--- .../tests/observability_grafana_dev_test.go | 2 +- tests/pkg/tests/observability_grafana_test.go | 4 ++-- .../tests/observability_manifestwork_test.go | 2 +- tests/pkg/tests/observability_metrics_test.go | 12 +++++------ tests/pkg/tests/observability_route_test.go | 2 +- 12 files changed, 43 insertions(+), 43 deletions(-) diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index 1a61e0501..14c93a253 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -39,7 +39,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0) -", func() { + Context("RHACM4K-1260: Observability: Verify monitoring operator and deployment status when metrics collection disabled [P2][Sev2][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0) -", func() { It("[Stable] Should have resource requirement defined in CR", func() { By("Check addon resource requirement") res, err := utils.GetMCOAddonSpecResources(testOptions) @@ -129,7 +129,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1418: Observability: Verify clustermanagementaddon CR for Observability - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -144,7 +144,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1074: Observability: Verify ObservabilityEndpoint operator deployment - Modifying MCO cr to enable observabilityaddon [P2][Sev2][Stable][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { Eventually(func() error { return utils.ModifyMCOAddonSpecMetrics(testOptions, true) }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) @@ -159,7 +159,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-6923: Observability: Verify default scrap interval change to 5 minutes - [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g2)", func() { + It("RHACM4K-6923: Observability: Verify default scrap interval change to 5 minutes - [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g2)", func() { By("Check default interval value is 300") Eventually(func() bool { mco, getErr := dynClient.Resource(utils.NewMCOGVRV1BETA2()).Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) @@ -171,7 +171,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster - Should not set interval to values beyond scope [P3][Sev3][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g0)", func() { By("Set interval to 14") Eventually(func() bool { err := utils.ModifyMCOAddonSpecInterval(testOptions, int64(14)) @@ -195,7 +195,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore (deploy/g1)", func() { + It("RHACM4K-1259: Observability: Verify imported cluster is observed [P3][Sev3][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore (deploy/g1)", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) @@ -209,7 +209,7 @@ var _ = Describe("", func() { } }) - Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore (addon/g1) -", func() { + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { return utils.UpdateObservabilityFromManagedCluster(testOptions, false) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index 31cc90a67..cb5d79dc2 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -51,7 +51,7 @@ var _ = Describe("", func() { } secret := "alertmanager-config" - It("RHACM4K-39481: Observability: Verify PrometheusRule resource(2.9) [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g1)", func() { + It("RHACM4K-39481: Observability: Verify PrometheusRule resource(2.9) [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g1)", func() { By("Checking if PrometheusRule: acm-observability-alert-rules is existed") command := "oc" @@ -75,7 +75,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected statefulsets @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if STS: Alertmanager and observability-thanos-rule exist") for _, label := range statefulsetLabels { sts, err := hubClient.AppsV1(). @@ -100,7 +100,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected configmap [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-default-rules is existed") cm, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[0], metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -109,7 +109,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) @@ -122,7 +122,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does not exist", configmap[1]) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the expected secret @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if SECRETS: alertmanager-config is existed") secret, err := hubClient.CoreV1().Secrets(MCO_NAMESPACE).Get(context.TODO(), secret, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -131,7 +131,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully got secret: %s", secret.GetName()) }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have the alertmanager configured in rule @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if --alertmanagers.url or --alertmanager.config or --alertmanagers.config-file is configured in rule") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -158,7 +158,7 @@ var _ = Describe("", func() { klog.V(3).Info("Have the alertmanager url configured in rule") }) - It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1404: Observability: Verify alert is created and received - Should have custom alert generated P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Creating custom alert rules") rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ @@ -229,7 +229,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully modified the secret: alertmanager-config") }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - Should have custom alert updated [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Updating custom alert rules") yamlB, _ := kustomize.Render( @@ -259,7 +259,7 @@ var _ = Describe("", func() { EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { + It("RHACM4K-1668: Observability: Updated alert rule can take effect automatically - delete the customized rules [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { rules, err := hubClient.AppsV1().StatefulSets(MCO_NAMESPACE).List(context.TODO(), metav1.ListOptions{ LabelSelector: THANOS_RULE_LABEL, @@ -300,7 +300,7 @@ var _ = Describe("", func() { klog.V(3).Infof("Successfully deleted CM: thanos-ruler-custom-rules") }) - It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e (alertforward/g0)", func() { + It("RHACM4K-3457: Observability: Verify managed cluster alert would be forward to hub alert manager - Should have alert named Watchdog forwarded to alertmanager [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e (alertforward/g0)", func() { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "rosa" diff --git a/tests/pkg/tests/observability_certrenew_test.go b/tests/pkg/tests/observability_certrenew_test.go index 0751eedc6..077898959 100644 --- a/tests/pkg/tests/observability_certrenew_test.go +++ b/tests/pkg/tests/observability_certrenew_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release (certrenew/g0)", func() { + It("RHACM4K-3073: Observability: Verify Observability Certificate rotation - Should have metrics collector pod restart if cert secret re-generated [P1][Sev1][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (certrenew/g0)", func() { By("Waiting for pods ready: observability-observatorium-api, observability-rbac-query-proxy, metrics-collector-deployment") // sleep 30s to wait for installation is ready time.Sleep(30 * time.Second) diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index a1cb49bd9..b236a7e23 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -32,7 +32,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { + It("RHACM4K-31474: Observability: Verify memcached setting max_item_size is populated on thanos-store - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -71,7 +71,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { + It("RHACM4K-31475: Observability: Verify memcached setting max_item_size is populated on thanos-query-frontend - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release(config/g1)", func() { By("Updating mco cr to update values in storeMemcached") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/maxitemsize/updatemcocr"}) @@ -110,7 +110,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*10).Should(BeTrue()) }) - It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release(config/g0)", func() { + It("RHACM4K-1235: Observability: Verify metrics data global setting on the managed cluster @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release(config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -126,7 +126,7 @@ var _ = Describe("", func() { Expect(observabilityAddonSpec["interval"]).To(Equal(int64(300))) }) - It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (config/g0)", func() { + It("RHACM4K-1065: Observability: Verify MCO CR storage class and PVC @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (config/g0)", func() { /* if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to MCO CR was created customized") @@ -318,7 +318,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { + It("RHACM4K-11169: Observability: Verify ACM Observability with Security Service Token credentials - [P2][Sev2][observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @pre-upgrade Checking service account annotations is set for store/query/rule/compact/receive @e2e (config/g0)", func() { mcoRes, err := dynClient.Resource(utils.NewMCOGVRV1BETA2()). Get(context.TODO(), MCO_CR_NAME, metav1.GetOptions{}) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 6bf49ba13..84afbe55c 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have custom dashboard which defined in configmap [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Creating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/sample_custom_dashboard"}, @@ -47,7 +47,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Updating custom dashboard configmap") yamlB, _ := kustomize.Render( kustomize.Options{KustomizationPath: "../../../examples/dashboards/update_sample_custom_dashboard"}, @@ -68,7 +68,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) }) - It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { + It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have no custom dashboard in grafana after related configmap removed [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { By("Deleting custom dashboard configmap") err = utils.DeleteConfigMap(testOptions, true, dashboardName, MCO_NAMESPACE) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index 182124f7b..a562f2ac7 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -30,7 +30,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1064: Observability: Verify MCO deployment - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check MCO in ready status") Eventually(func() error { err = utils.CheckMCOComponents(testOptions) @@ -56,7 +56,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { + It("RHACM4K-1288: Observability: Verify Observability function working on the hub cluster - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g0)", func() { By("Check etrics-collector pod is ready") Eventually(func() error { @@ -92,7 +92,7 @@ var _ = Describe("", func() { }) - It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g1)", func() { + It("RHACM4K-30645: Observability: Verify setting in CM cluster-monitoring-config is not removed after MCO enabled - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (deployment/g1)", func() { By("Check enableUserAlertmanagerConfig value is not replaced in the CM cluster-monitoring-config") if os.Getenv("SKIP_INSTALL_STEP") == "true" { Skip("Skip the case due to this case is only available before MCOCR deployment") diff --git a/tests/pkg/tests/observability_endpoint_preserve_test.go b/tests/pkg/tests/observability_endpoint_preserve_test.go index 03f706e61..cdcf03841 100644 --- a/tests/pkg/tests/observability_endpoint_preserve_test.go +++ b/tests/pkg/tests/observability_endpoint_preserve_test.go @@ -29,7 +29,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0) -", func() { + Context("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually [P2][Sev2][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0) -", func() { newDep := &appv1.Deployment{} It("[Stable] Deleting metrics-collector deployment", func() { var ( @@ -112,7 +112,7 @@ var _ = Describe("", func() { }) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should revert any manual changes on metrics-collector-view clusterolebinding [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } @@ -150,7 +150,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*1).Should(BeTrue()) }) - It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { + It("RHACM4K-1659: Observability: Verify metrics collector is prevent to be configured manually - Should recreate on metrics-collector-serving-certs-ca-bundle configmap if deleted [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (endpoint_preserve/g0)", func() { if os.Getenv("IS_KIND_ENV") == "true" { Skip("Skip the case due to run in KinD") } diff --git a/tests/pkg/tests/observability_grafana_dev_test.go b/tests/pkg/tests/observability_grafana_dev_test.go index d63c9c9ed..f2f2c623c 100644 --- a/tests/pkg/tests/observability_grafana_dev_test.go +++ b/tests/pkg/tests/observability_grafana_dev_test.go @@ -18,7 +18,7 @@ var _ = Describe("", func() { // Do not need to run this case in canary environment // If we really need it in canary, ensure the grafana-dev-test.sh is available in observability-e2e-test image and all required commands exist - It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana_dev/g0)", func() { + It("RHACM4K-1705: Observability: Setup a Grafana develop instance [P1][Sev1][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (grafana_dev/g0)", func() { cmd := exec.Command("../../grafana-dev-test.sh") var out bytes.Buffer cmd.Stdout = &out diff --git a/tests/pkg/tests/observability_grafana_test.go b/tests/pkg/tests/observability_grafana_test.go index 25983f690..badc3e999 100644 --- a/tests/pkg/tests/observability_grafana_test.go +++ b/tests/pkg/tests/observability_grafana_test.go @@ -31,7 +31,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana/g0)", func() { + It("RHACM4K-1066: Observability: Verify Grafana - Should have metric data in grafana console @BVT - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (grafana/g0)", func() { Eventually(func() error { clusters, err := utils.ListManagedClusters(testOptions) if err != nil { @@ -52,7 +52,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @post-upgrade @post-restore @e2e @post-release (grafana/g1)", func() { + It("RHACM4K-23537: Observability: Verify managed cluster labels in Grafana dashboards(2.7) - [P1][Sev1][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (grafana/g1)", func() { Eventually(func() bool { clientDynamic := utils.GetKubeClientDynamic(testOptions, true) objs, err := clientDynamic.Resource(utils.NewOCMManagedClustersGVR()).List(context.TODO(), metav1.ListOptions{}) diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index c3a777e1e..cc014c0d9 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -27,7 +27,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @post-upgrade @post-restore @e2e @post-release (manifestwork/g0) -", func() { + Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) clusterName := utils.GetManagedClusterName(testOptions) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 785d9f817..e0364986f 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -48,7 +48,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g1)", func() { + It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( @@ -73,7 +73,7 @@ var _ = Describe("", func() { } }) - It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") yamlB, err := kustomize.Render(kustomize.Options{KustomizationPath: "../../../examples/metrics/allowlist"}) Expect(err).ToNot(HaveOccurred()) @@ -102,7 +102,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -120,7 +120,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Waiting for deleted metrics disappear on grafana console") Eventually(func() error { for _, cluster := range clusters { @@ -138,7 +138,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { + It("RHACM4K-3063: Observability: Metrics removal from default allowlist [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Deleting custom metrics allowlist configmap") Eventually(func() error { err := hubClient.CoreV1(). @@ -164,7 +164,7 @@ var _ = Describe("", func() { }, EventuallyTimeoutMinute*10, EventuallyIntervalSecond*5).Should(MatchError("Failed to find metric name from response")) }) - It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @post-upgrade @post-restore @e2e @post-release @pre-upgrade (ssli/g1)", func() { + It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (ssli/g1)", func() { metricList := utils.GetDefaultMetricList(testOptions) ignoreMetricMap := utils.GetIgnoreMetricMap() _, etcdPodList := utils.GetPodList( diff --git a/tests/pkg/tests/observability_route_test.go b/tests/pkg/tests/observability_route_test.go index 1a8e7533d..710bf15fb 100644 --- a/tests/pkg/tests/observability_route_test.go +++ b/tests/pkg/tests/observability_route_test.go @@ -37,7 +37,7 @@ var _ = Describe("", func() { testOptions.HubCluster.KubeContext) }) - It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @post-upgrade @post-restore Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { + It("RHACM4K-1693: Observability: Verify Observability working with new OCP API Server certs - @BVT - [P1][Sev1][observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore Should access metrics via rbac-query-proxy route @e2e (route/g0)", func() { Eventually(func() error { query := "/api/v1/query?query=cluster_version" From 4b2fa211b6743500ad5f7d8b4e29840375a27547 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 23 Sep 2024 14:56:30 +0800 Subject: [PATCH 132/150] update yaml to add scrapeSizeLimitBytes Signed-off-by: Chang Liang Qu --- .../mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml index 74cde6fa6..99c04fc57 100644 --- a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml +++ b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml @@ -16,6 +16,7 @@ spec: observabilityAddonSpec: enableMetrics: true interval: 300 + scrapeSizeLimitBytes: 1073741824 storageConfig: alertmanagerStorageSize: 2Gi compactStorageSize: 2Gi From c041fe5cfb4f9e7204989b429b9424f28479e3d2 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Thu, 10 Oct 2024 14:22:04 +0800 Subject: [PATCH 133/150] ignore metrics kubevirt_vmi_info to improve cases pass rate Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index 406f2b8d3..e8328126e 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -97,3 +97,4 @@ coredns_forward_responses_total cnv:vmi_status_running:count kubevirt_hyperconverged_operator_health_status kubevirt_hco_system_health_status +kubevirt_vmi_info From eaf27bb5681fcc5f64d1bb9899ae15c409e204cb Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 21 Oct 2024 16:43:50 +0800 Subject: [PATCH 134/150] add auto case for 52080 Signed-off-by: Chang Liang Qu --- ...servability-v1beta1-to-v1beta2-golden.yaml | 1 + tests/pkg/tests/observability_alert_test.go | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml index 99c04fc57..f508e8b14 100644 --- a/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml +++ b/examples/mco/e2e/v1beta1/observability-v1beta1-to-v1beta2-golden.yaml @@ -17,6 +17,7 @@ spec: enableMetrics: true interval: 300 scrapeSizeLimitBytes: 1073741824 + workers: 1 storageConfig: alertmanagerStorageSize: 2Gi compactStorageSize: 2Gi diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index cb5d79dc2..b082eca10 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -109,6 +109,42 @@ var _ = Describe("", func() { klog.V(3).Infof("Configmap %s does exist", configmap[0]) }) + It("RHACM4K-52080: Observability: Verify Endpointmetrics reconcile CMO Config changes [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g2)", func() { + By("Checking if CM: cluster-monitoring-config is existed") + namespace := "openshift-monitoring" + configMapName := "cluster-monitoring-config" + cm, err := hubClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Expect(cm.ResourceVersion).ShouldNot(BeEmpty()) + klog.V(3).Infof("Configmap %s does exist", configmap[0]) + + By("Remove additionalAlertmanagerConfigs from the cm cluster-monitoring-config") + configContent := cm.Data["config.yaml"] + if strings.Contains(configContent, "additionalAlertmanagerConfigs:") { + // Find and remove the additionalAlertmanagerConfigs section + startIndex := strings.Index(configContent, "additionalAlertmanagerConfigs:") + endIndex := strings.Index(configContent[startIndex:], "externalLabels:") + if endIndex != -1 { + endIndex += startIndex + removedContent := configContent[startIndex:endIndex] + + // Remove the section from the config.yaml + configContent = strings.Replace(configContent, removedContent, "", 1) + cm.Data["config.yaml"] = configContent + + // Update the ConfigMap + _, err = hubClient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred(), "Failed to update ConfigMap") + fmt.Println("Removed additionalAlertmanagerConfigs and updated the ConfigMap.") + } else { + fmt.Println("Could not find the externalLabels section after additionalAlertmanagerConfigs.") + } + } else { + fmt.Println("No additionalAlertmanagerConfigs section found.") + } + }) + It("RHACM4K-1404: Observability: Verify alert is created and received - Should not have the CM: thanos-ruler-custom-rules [P3][Sev3][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (alert/g0)", func() { By("Checking if CM: thanos-ruler-custom-rules not existed") _, err := hubClient.CoreV1().ConfigMaps(MCO_NAMESPACE).Get(context.TODO(), configmap[1], metav1.GetOptions{}) From e0b115945ce99d05cb7e16c02b3c49ce1c658ed3 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Tue, 22 Oct 2024 15:47:47 +0800 Subject: [PATCH 135/150] ignore kubevirt_vm_running_status_last_transition_timestamp_seconds Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index e8328126e..a54125c0d 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -98,3 +98,4 @@ cnv:vmi_status_running:count kubevirt_hyperconverged_operator_health_status kubevirt_hco_system_health_status kubevirt_vmi_info +kubevirt_vm_running_status_last_transition_timestamp_seconds From b6e22c22b2bb8aaedd7e8a9e889e919380202b94 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 25 Oct 2024 14:08:51 +0800 Subject: [PATCH 136/150] ignore metrics kubevirt_vm_non_running_status_last_transition_timestamp_seconds Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index a54125c0d..ec1627401 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -99,3 +99,4 @@ kubevirt_hyperconverged_operator_health_status kubevirt_hco_system_health_status kubevirt_vmi_info kubevirt_vm_running_status_last_transition_timestamp_seconds +kubevirt_vm_non_running_status_last_transition_timestamp_seconds From 50cb662a70f7a3e3c9acd027f13fd9950a190f89 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Fri, 1 Nov 2024 19:42:03 +0800 Subject: [PATCH 137/150] ignore kubevirt_vm_error_status_last_transition_timestamp_seconds Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index ec1627401..eb08f8e0f 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -100,3 +100,4 @@ kubevirt_hco_system_health_status kubevirt_vmi_info kubevirt_vm_running_status_last_transition_timestamp_seconds kubevirt_vm_non_running_status_last_transition_timestamp_seconds +kubevirt_vm_error_status_last_transition_timestamp_seconds From 57b2670d73cbc7cb9e41dafd78f1e19b7fed64f2 Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Mon, 4 Nov 2024 10:58:50 +0800 Subject: [PATCH 138/150] ignore vm related metrics Signed-off-by: Chang Liang Qu --- tests/pkg/testdata/ignored-metric-list | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/pkg/testdata/ignored-metric-list b/tests/pkg/testdata/ignored-metric-list index eb08f8e0f..615e3fc9a 100644 --- a/tests/pkg/testdata/ignored-metric-list +++ b/tests/pkg/testdata/ignored-metric-list @@ -101,3 +101,19 @@ kubevirt_vmi_info kubevirt_vm_running_status_last_transition_timestamp_seconds kubevirt_vm_non_running_status_last_transition_timestamp_seconds kubevirt_vm_error_status_last_transition_timestamp_seconds +kubevirt_vm_starting_status_last_transition_timestamp_seconds +kubevirt_vm_migrating_status_last_transition_timestamp_seconds +kubevirt_vmi_memory_available_byte +kubevirt_vmi_memory_unused_bytes +kubevirt_vmi_memory_cached_bytes +kubevirt_vmi_memory_used_bytes +kubevirt_vmi_phase_count +kubevirt_vmi_cpu_usage_seconds_total +kubevirt_vmi_network_receive_bytes_total +kubevirt_vmi_network_transmit_bytes_total +kubevirt_vmi_network_receive_packets_total +kubevirt_vmi_network_transmit_packets_total +kubevirt_vmi_storage_iops_read_total +kubevirt_vmi_storage_iops_write_total +kubevirt_vm_resource_requests +kubevirt_vmi_memory_available_bytes From 1b2f891161785b98de8f6fa98e6818c61ebd021b Mon Sep 17 00:00:00 2001 From: Chang Liang Qu Date: Sun, 10 Nov 2024 09:59:48 +0800 Subject: [PATCH 139/150] update grafanadev script and readme Signed-off-by: Chang Liang Qu --- tools/README.md | 35 ++++++++++++++++------ tools/generate-dashboard-configmap-yaml.sh | 4 +-- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/tools/README.md b/tools/README.md index 78d6c8219..c900c76fe 100644 --- a/tools/README.md +++ b/tools/README.md @@ -6,9 +6,9 @@ You must enable the observability service by creating a MultiClusterObservabilit ## Setup grafana develop instance -Firstly, you should use this script `setup-grafana-dev.sh` to setup your grafana instance. +Firstly, you should use this script `setup-grafana-dev.sh` to setup your grafana instance. You need to run this as a kubeadmin user. -``` +```bash $ ./setup-grafana-dev.sh --deploy secret/grafana-dev-config created deployment.apps/grafana-dev created @@ -23,17 +23,33 @@ service/grafana-dev patched route.route.openshift.io/grafana-dev patched oauthclient.oauth.openshift.io/grafana-proxy-client-dev patched clusterrolebinding.rbac.authorization.k8s.io/open-cluster-management:grafana-crb-dev patched + +Grafana dev URL: grafana-dev-open-cluster-management-observability.apps..com ``` ## Switch user to be grafana admin Secondly, you need to ask a user to login grafana-dev host before use this script `switch-to-grafana-admin.sh` to switch the user to be a grafana admin. -``` +```bash $ ./switch-to-grafana-admin.sh kube:admin User switched to be grafana admin ``` +The above example shows a kubeadmin user. However, let us say the following user is logged in: + +```bash +oc whoami +frank +``` + +Now, if we want to make `frank` Grafana admin, we will simply run: + +```bash +$ ./switch-to-grafana-admin.sh frank +User frank switched to be grafana admin +``` + ## Design your grafana dashboard Now, refresh the grafana console and follow these steps to design your dashboard: @@ -46,7 +62,7 @@ Now, refresh the grafana console and follow these steps to design your dashboard You can use this script `generate-dashboard-configmap-yaml.sh` to generate a dashboard configmap and save it to local. -``` +```bash ./generate-dashboard-configmap-yaml.sh "Your Dashboard Name" Save dashboard to ./your-dashboard-name.yaml ``` @@ -72,15 +88,17 @@ data: ``` Note: if your dashboard is not in `General` folder, you can specify the folder name in `annotations` of this ConfigMap: -``` + +```yaml annotations: observability.open-cluster-management.io/dashboard-folder: Custom ``` -6. Update metrics allowlist +5. Update metrics allowlist When you generate a new dashboard like [example/custom-dashboard.yaml](example/custom-dashboard.yaml), there may have no data when you first create it. This is because it depends on some new metrics which don't upload to hub by default. You also need to update custom metrics allowlist, so that new metrics can be uploaded to the server and shown in dashboard. In this example, run the following command to update metrics. -```yaml + +```bash oc apply -f observability-metrics-custom-allowlist.yaml ``` @@ -88,11 +106,10 @@ oc apply -f observability-metrics-custom-allowlist.yaml You can use the following command to uninstall your grafana instance. -``` +```bash $ ./setup-grafana-dev.sh --clean secret "grafana-dev-config" deleted deployment.apps "grafana-dev" deleted -Error from server (NotFound): services "grafana-dev" not found serviceaccount "grafana-dev" deleted route.route.openshift.io "grafana-dev" deleted persistentvolumeclaim "grafana-dev" deleted diff --git a/tools/generate-dashboard-configmap-yaml.sh b/tools/generate-dashboard-configmap-yaml.sh index bf35c6d02..a2c17fc85 100755 --- a/tools/generate-dashboard-configmap-yaml.sh +++ b/tools/generate-dashboard-configmap-yaml.sh @@ -98,9 +98,9 @@ start() { fi # delete dashboard uid avoid conflict with old dashboard - dashboardJson=$(echo $dashboardJson | $PYTHON_CMD -c "import sys, json; d=json.load(sys.stdin);del d['uid'];print(json.dumps(d))") + dashboardJson=$(echo "$dashboardJson" | $PYTHON_CMD -c "import sys, json; d=json.load(sys.stdin);del d['uid'];print(json.dumps(d))") - if [ $dashboardFolderId -ne 0 ]; then + if [ -n "$dashboardFolderId" ] && [ "$dashboardFolderId" -ne 0 ]; then cat >$savePath/$dashboard_name.yaml < Date: Tue, 19 Nov 2024 11:28:01 +0100 Subject: [PATCH 140/150] Increase timeout of custom dashboard test (#267) Adding custom dashboard sometimes fail. The dashboard loader will try adding the dashboard quite a while until it hopefully works. Increase timeout to have a higher chance of success. Signed-off-by: Jacob Baungard Hansen --- tests/pkg/tests/observability_dashboard_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pkg/tests/observability_dashboard_test.go b/tests/pkg/tests/observability_dashboard_test.go index 84afbe55c..3453bbf76 100644 --- a/tests/pkg/tests/observability_dashboard_test.go +++ b/tests/pkg/tests/observability_dashboard_test.go @@ -44,7 +44,7 @@ var _ = Describe("", func() { Eventually(func() bool { _, result := utils.ContainDashboard(testOptions, dashboardTitle) return result - }, EventuallyTimeoutMinute*3, EventuallyIntervalSecond*5).Should(BeTrue()) + }, EventuallyTimeoutMinute*7, EventuallyIntervalSecond*5).Should(BeTrue()) }) It("RHACM4K-1669: Observability: Verify new customized Grafana dashboard - Should have update custom dashboard after configmap updated [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (dashboard/g0)", func() { From cc5c9687ca00d52bd53e45f2f2eaa4ac55853f6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Tue, 19 Nov 2024 11:50:02 +0100 Subject: [PATCH 141/150] Add ACM obs team to OWNERS (#268) Signed-off-by: Jacob Baungard Hansen --- OWNERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/OWNERS b/OWNERS index 84398a5cc..fbe2b19e3 100644 --- a/OWNERS +++ b/OWNERS @@ -1,5 +1,12 @@ approvers: - quchangl-github +- subbarao-meduri +- saswatamcode +- philipgough +- coleenquadros +- thibaultmg +- moadz +- jacobbaungard reviewers: - haoqing0110 From 01e98ad3ef1845980a5bed377149e3c7cdbe695e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacob=20Baung=C3=A5rd=20Hansen?= Date: Thu, 5 Dec 2024 09:41:20 +0100 Subject: [PATCH 142/150] Don't use custom dashboard in grafana-dev test (#271) This improves reliability while keeping same test coverage. Matches the change done here: https://github.com/stolostron/multicluster-observability-operator/pull/1646 Signed-off-by: Jacob Baungard Hansen --- tests/grafana-dev-test.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/grafana-dev-test.sh b/tests/grafana-dev-test.sh index 4e612c816..33a892b67 100755 --- a/tests/grafana-dev-test.sh +++ b/tests/grafana-dev-test.sh @@ -7,9 +7,6 @@ base_dir="$(cd "$(dirname "$0")/.." ; pwd -P)" cd "$base_dir" obs_namespace=open-cluster-management-observability -# create a dashboard for test export grafana dashboard -kubectl apply -n "$obs_namespace" -f "$base_dir"/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml - # test deploy grafana-dev cd $base_dir/tools ./setup-grafana-dev.sh --deploy @@ -63,7 +60,7 @@ n=0 until [ "$n" -ge 10 ] do # test export grafana dashboard - ./generate-dashboard-configmap-yaml.sh "Sample Dashboard for E2E" + ./generate-dashboard-configmap-yaml.sh "ACM - Clusters Overview" if [ $? -eq 0 ]; then break fi @@ -81,6 +78,3 @@ if [ $? -ne 0 ]; then echo "Failed run setup-grafana-dev.sh --clean" exit 1 fi - -# clean test env -kubectl delete -n "$obs_namespace" -f "$base_dir"/examples/dashboards/sample_custom_dashboard/custom-sample-dashboard.yaml From 728cf455b97e7cfa73f05b92f0942edd496180bf Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 14:37:03 +0100 Subject: [PATCH 143/150] Various post-merge fixes Fixing initial issues after merge Signed-off-by: Jacob Baungard Hansen --- .../observability-e2e-test_suite_test.go | 2 + tests/pkg/tests/observability_addon_test.go | 20 ++-- tests/pkg/tests/observability_install_test.go | 2 +- .../tests/observability_manifestwork_test.go | 1 - tests/pkg/tests/observability_metrics_test.go | 97 +++++++++---------- tests/pkg/utils/mco_configmaps.go | 1 - tests/pkg/utils/mco_deploy.go | 12 +-- tests/pkg/utils/mco_grafana.go | 1 - tests/pkg/utils/mco_managedcluster.go | 2 - tests/pkg/utils/mco_metric.go | 8 -- 10 files changed, 58 insertions(+), 88 deletions(-) diff --git a/tests/pkg/tests/observability-e2e-test_suite_test.go b/tests/pkg/tests/observability-e2e-test_suite_test.go index 76ededd85..75a4c13bc 100644 --- a/tests/pkg/tests/observability-e2e-test_suite_test.go +++ b/tests/pkg/tests/observability-e2e-test_suite_test.go @@ -9,6 +9,7 @@ import ( "fmt" "math/rand" "os" + "strings" "testing" "time" @@ -145,6 +146,7 @@ var _ = BeforeSuite(func() { var _ = AfterSuite(func() { if !testFailed { uninstallMCO() + } }) func initVars() { diff --git a/tests/pkg/tests/observability_addon_test.go b/tests/pkg/tests/observability_addon_test.go index f5cc45ebc..398417b80 100644 --- a/tests/pkg/tests/observability_addon_test.go +++ b/tests/pkg/tests/observability_addon_test.go @@ -9,8 +9,6 @@ import ( "fmt" "strings" - "errors" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -80,16 +78,15 @@ var _ = Describe("Observability:", func() { } return nil }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(Succeed()) - + // TODO: do we need this, disabled in core-automation-repo Eventually(func() error { - err = utils.CheckAllOBADisabled(testOptions) - if err != nil { - return err - } - return nil - }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) - */ + err = utils.CheckAllOBADisabled(testOptions) + if err != nil { + return err + } + return nil + }, EventuallyTimeoutMinute*20, EventuallyIntervalSecond*5).Should(Succeed()) }) // it takes Prometheus 5m to notice a metric is not available - // https://github.com/prometheus/prometheus/issues/1810 @@ -147,6 +144,7 @@ var _ = Describe("Observability:", func() { return nil }, EventuallyTimeoutMinute*15, EventuallyIntervalSecond*5).Should(Succeed()) }) + }) It("RHACM4K-6923: Observability: Verify default scrap interval change to 5 minutes - [P2][Sev2][Observability][Stable]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (addon/g2)", func() { By("Check default interval value is 300") @@ -197,7 +195,7 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*5, EventuallyIntervalSecond*5).Should(BeTrue()) } }) - + Context("RHACM4K-7518: Observability: Disable the Observability by updating managed cluster label [P2][Sev2][Observability]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore (addon/g1) -", func() { It("[Stable] Modifying managedcluster cr to disable observability", func() { Eventually(func() error { diff --git a/tests/pkg/tests/observability_install_test.go b/tests/pkg/tests/observability_install_test.go index 2b65eea0d..95bbd5b93 100644 --- a/tests/pkg/tests/observability_install_test.go +++ b/tests/pkg/tests/observability_install_test.go @@ -36,7 +36,7 @@ func installMCO() { testOptions.HubCluster.ClusterServerURL, testOptions.KubeConfig, testOptions.HubCluster.KubeContext) - + // TODO (jacob): The test RHACM4K-30645 depends on the below. Should that test maybe be moved here? By("Deploy CM cluster-monitoring-config") diff --git a/tests/pkg/tests/observability_manifestwork_test.go b/tests/pkg/tests/observability_manifestwork_test.go index 636002a4d..ac7497a42 100644 --- a/tests/pkg/tests/observability_manifestwork_test.go +++ b/tests/pkg/tests/observability_manifestwork_test.go @@ -32,7 +32,6 @@ var _ = Describe("Observability:", func() { } }) - Context("[P2][Sev2][observability][Stable] Should be automatically created within 1 minute when delete manifestwork @ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release (manifestwork/g0) -", func() { manifestWorkName := "endpoint-observability-work" clientDynamic := utils.GetKubeClientDynamic(testOptions, true) diff --git a/tests/pkg/tests/observability_metrics_test.go b/tests/pkg/tests/observability_metrics_test.go index 550984ab9..deae1d758 100644 --- a/tests/pkg/tests/observability_metrics_test.go +++ b/tests/pkg/tests/observability_metrics_test.go @@ -50,35 +50,32 @@ var _ = Describe("Observability:", func() { }, EventuallyTimeoutMinute*6, EventuallyIntervalSecond*5).Should(Succeed()) }) - It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g1)", func() { - metricList := utils.GetDefaultMetricList(testOptions) - _, etcdPodList := utils.GetPodList( - testOptions, - true, - "openshift-etcd", - "app=etcd", - ) - // ignore etcd network peer metrics for SNO cluster - if etcdPodList != nil && len(etcdPodList.Items) <= 0 { - ignoreMetricMap["etcd_network_peer_received_bytes_total"] = true - ignoreMetricMap["etcd_network_peer_sent_bytes_total"] = true - } - for _, name := range metricList { - if _, ok := ignoredMetrics[name]; ok { - continue - } - - Eventually(func() error { - res, err := utils.QueryGrafana(testOptions, query) - if err != nil { - return err - } - if len(res.Data.Result) == 0 { - return fmt.Errorf("no data found for %s", query) - } - }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) - } - }) + // TODO (jacob): exact same as RHACM4K-3339?? + // It("RHACM4K-1449 - Observability - Verify metrics data consistency [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g1)", func() { + // metricList := utils.GetDefaultMetricList(testOptions) + // _, etcdPodList := utils.GetPodList( + // testOptions, + // true, + // "openshift-etcd", + // "app=etcd", + // ) + // + // for _, name := range metricList { + // if _, ok := ignoredMetrics[name]; ok { + // continue + // } + // + // Eventually(func() error { + // res, err := utils.QueryGrafana(testOptions, query) + // if err != nil { + // return err + // } + // if len(res.Data.Result) == 0 { + // return fmt.Errorf("no data found for %s", query) + // } + // }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) + // } + // }) It("RHACM4K-1658: Observability: Customized metrics data are collected [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (metrics/g0)", func() { By("Adding custom metrics allowlist configmap") @@ -249,34 +246,30 @@ var _ = Describe("Observability:", func() { return nil }, EventuallyTimeoutMinute*1, EventuallyIntervalSecond*5).Should(Succeed()) + } + }) It("RHACM4K-3339: Observability: Verify recording rule - Should have metrics which used grafana dashboard [P2][Sev2][Observability][Integration]@ocpInterop @non-ui-post-restore @non-ui-post-release @non-ui-pre-upgrade @non-ui-post-upgrade @post-upgrade @post-restore @e2e @post-release @pre-upgrade (ssli/g1)", func() { - metricList := utils.GetDefaultMetricList(testOptions) - _, etcdPodList := utils.GetPodList( - testOptions, - true, - "openshift-etcd", - "app=etcd", - ) - // ignore etcd network peer metrics for SNO cluster - if etcdPodList != nil && len(etcdPodList.Items) <= 0 { - ignoreMetricMap["etcd_network_peer_received_bytes_total"] = true - ignoreMetricMap["etcd_network_peer_sent_bytes_total"] = true - } + metricList, _ := utils.GetDefaultMetricList(testOptions) + for _, name := range metricList { - _, ok := ignoreMetricMap[name] - if !ok { - Eventually(func() error { - res, err := utils.QueryGrafana(testOptions, query) - if err != nil { - return err - } - if len(res.Data.Result) == 0 { - return fmt.Errorf("no data found for %s", query) - } - }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) + if _, ok := ignoredMetrics[name]; ok { + continue } + + Eventually(func() error { + query := fmt.Sprintf("%s", name) + res, err := utils.QueryGrafana(testOptions, query) + + if err != nil { + return err + } + if len(res.Data.Result) == 0 { + return fmt.Errorf("no data found for %s", query) + } + return nil + }, EventuallyTimeoutMinute*2, EventuallyIntervalSecond*3).Should(Succeed()) } }) diff --git a/tests/pkg/utils/mco_configmaps.go b/tests/pkg/utils/mco_configmaps.go index 80a2e86cb..5ec376848 100644 --- a/tests/pkg/utils/mco_configmaps.go +++ b/tests/pkg/utils/mco_configmaps.go @@ -8,7 +8,6 @@ import ( "context" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) diff --git a/tests/pkg/utils/mco_deploy.go b/tests/pkg/utils/mco_deploy.go index a5d028593..a44dd8a41 100644 --- a/tests/pkg/utils/mco_deploy.go +++ b/tests/pkg/utils/mco_deploy.go @@ -6,16 +6,9 @@ package utils import ( "context" -<<<<<<< HEAD "encoding/json" "errors" "fmt" -======= - b64 "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" ->>>>>>> core-automation/main "os" "path/filepath" "reflect" @@ -27,10 +20,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/yaml" -<<<<<<< HEAD "k8s.io/client-go/dynamic" -======= ->>>>>>> core-automation/main "k8s.io/klog" ) @@ -468,7 +458,7 @@ func RevertMCOCRModification(opt TestOptions) error { return nil } -unc CheckMCOAddonResources(opt TestOptions) error { +func CheckMCOAddonResources(opt TestOptions) error { client := NewKubeClient( opt.HubCluster.ClusterServerURL, opt.KubeConfig, diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index fae63ec7a..3a864629d 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -5,7 +5,6 @@ package utils import ( - "fmt" "io/ioutil" "os" "strings" diff --git a/tests/pkg/utils/mco_managedcluster.go b/tests/pkg/utils/mco_managedcluster.go index 796203dba..ba1eb9ac2 100644 --- a/tests/pkg/utils/mco_managedcluster.go +++ b/tests/pkg/utils/mco_managedcluster.go @@ -5,10 +5,8 @@ package utils import ( - "context" "errors" - "fmt" "os" goversion "github.com/hashicorp/go-version" diff --git a/tests/pkg/utils/mco_metric.go b/tests/pkg/utils/mco_metric.go index dff82f25f..763671a08 100644 --- a/tests/pkg/utils/mco_metric.go +++ b/tests/pkg/utils/mco_metric.go @@ -5,18 +5,10 @@ package utils import ( -<<<<<<< HEAD "context" "crypto/tls" "fmt" "io" -======= - "bufio" - "context" - "crypto/tls" - "fmt" - "io/ioutil" ->>>>>>> core-automation/main "net/http" "net/url" "os" From f06a7d1c71c651cd4f401884164051f0d53b36b4 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 14:50:54 +0100 Subject: [PATCH 144/150] Formatting Signed-off-by: Jacob Baungard Hansen --- execute_obs_interop_commands.sh | 62 +++++++++---------- tests/pkg/tests/observability_alert_test.go | 2 + tests/pkg/tests/observability_config_test.go | 1 - .../pkg/tests/observability_uninstall_test.go | 1 - 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/execute_obs_interop_commands.sh b/execute_obs_interop_commands.sh index 2042cb4f5..be17ed337 100644 --- a/execute_obs_interop_commands.sh +++ b/execute_obs_interop_commands.sh @@ -21,48 +21,48 @@ export SKIP_UNINSTALL_STEP=${SKIP_UNINSTALL_STEP:-'true'} export TAGGING=${TAGGING:-} if [[ -n ${PARAM_AWS_ACCESS_KEY_ID} ]]; then - export AWS_ACCESS_KEY_ID=${PARAM_AWS_ACCESS_KEY_ID} + export AWS_ACCESS_KEY_ID=${PARAM_AWS_ACCESS_KEY_ID} fi if [[ -n ${PARAM_AWS_SECRET_ACCESS_KEY} ]]; then - export AWS_SECRET_ACCESS_KEY=${PARAM_AWS_SECRET_ACCESS_KEY} + export AWS_SECRET_ACCESS_KEY=${PARAM_AWS_SECRET_ACCESS_KEY} fi # if [[ ${!USE_MINIO} == "false" ]]; then # export IS_CANARY_ENV=true -# fi +# fi export IS_CANARY_ENV=true if [[ -z ${HUB_CLUSTER_NAME} || -z ${BASE_DOMAIN} || -z ${OC_CLUSTER_USER} || -z ${OC_HUB_CLUSTER_PASS} || -z ${OC_HUB_CLUSTER_API_URL} ]]; then - echo "Aborting test.. OCP HUB details are required for the test execution" - exit 1 + echo "Aborting test.. OCP HUB details are required for the test execution" + exit 1 else - if [[ -n ${MANAGED_CLUSTER_USER} && -n ${MANAGED_CLUSTER_PASS} && -n ${MANAGED_CLUSTER_API_URL} ]]; then - oc login --insecure-skip-tls-verify -u $MANAGED_CLUSTER_USER -p $MANAGED_CLUSTER_PASS $MANAGED_CLUSTER_API_URL - oc config view --minify --raw=true > ~/.kube/managed_kubeconfig - export MAKUBECONFIG=~/.kube/managed_kubeconfig - fi - set +x - oc login --insecure-skip-tls-verify -u $OC_CLUSTER_USER -p $OC_HUB_CLUSTER_PASS $OC_HUB_CLUSTER_API_URL - set -x - - oc config view --minify --raw=true > userfile - //cat userfile - whoami - rm -rf ~/.kube/config - cp userfile ~/.kube/config - //cat ~/.kube/config - export KUBECONFIG=~/.kube/config + if [[ -n ${MANAGED_CLUSTER_USER} && -n ${MANAGED_CLUSTER_PASS} && -n ${MANAGED_CLUSTER_API_URL} ]]; then + oc login --insecure-skip-tls-verify -u $MANAGED_CLUSTER_USER -p $MANAGED_CLUSTER_PASS $MANAGED_CLUSTER_API_URL + oc config view --minify --raw=true >~/.kube/managed_kubeconfig + export MAKUBECONFIG=~/.kube/managed_kubeconfig + fi + set +x + oc login --insecure-skip-tls-verify -u $OC_CLUSTER_USER -p $OC_HUB_CLUSTER_PASS $OC_HUB_CLUSTER_API_URL + set -x - go mod vendor && ginkgo build ./tests/pkg/tests/ - cd tests - cp resources/options.yaml.template resources/options.yaml - /usr/local/bin/yq e -i '.options.hub.name="'"$HUB_CLUSTER_NAME"'"' resources/options.yaml - /usr/local/bin/yq e -i '.options.hub.baseDomain="'"$BASE_DOMAIN"'"' resources/options.yaml - /usr/local/bin/yq e -i '.options.clusters.name="'"$MANAGED_CLUSTER_NAME"'"' resources/options.yaml - /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml - /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"$MAKUBECONFIG"'"' resources/options.yaml - cat resources/options.yaml - ginkgo --focus=$TAGGING -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 + oc config view --minify --raw=true >userfile + //cat userfile + whoami + rm -rf ~/.kube/config + cp userfile ~/.kube/config + //cat ~/.kube/config + export KUBECONFIG=~/.kube/config + + go mod vendor && ginkgo build ./tests/pkg/tests/ + cd tests + cp resources/options.yaml.template resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.name="'"$HUB_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.hub.baseDomain="'"$BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.name="'"$MANAGED_CLUSTER_NAME"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.baseDomain="'"$MANAGED_CLUSTER_BASE_DOMAIN"'"' resources/options.yaml + /usr/local/bin/yq e -i '.options.clusters.kubeconfig="'"$MAKUBECONFIG"'"' resources/options.yaml + cat resources/options.yaml + ginkgo --focus=$TAGGING -v pkg/tests/ -- -options=../../resources/options.yaml -v=5 fi diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index ed113a434..bd22345e2 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -11,9 +11,11 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "net/http" "net/url" "os" + "os/exec" "reflect" "sort" "strings" diff --git a/tests/pkg/tests/observability_config_test.go b/tests/pkg/tests/observability_config_test.go index 6a3146f27..e721997cf 100644 --- a/tests/pkg/tests/observability_config_test.go +++ b/tests/pkg/tests/observability_config_test.go @@ -7,7 +7,6 @@ package tests import ( "context" "fmt" - "os" "strings" "time" diff --git a/tests/pkg/tests/observability_uninstall_test.go b/tests/pkg/tests/observability_uninstall_test.go index 48925380e..0dc0f89b4 100644 --- a/tests/pkg/tests/observability_uninstall_test.go +++ b/tests/pkg/tests/observability_uninstall_test.go @@ -7,7 +7,6 @@ package tests import ( "context" "errors" - "fmt" "os" . "github.com/onsi/ginkgo" From 2081bafee8e3c9d51dff55006953f449617def52 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 14:54:50 +0100 Subject: [PATCH 145/150] Don't use ioutils Signed-off-by: Jacob Baungard Hansen --- tests/pkg/tests/observability_alert_test.go | 3 +-- tests/pkg/utils/mco_grafana.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/pkg/tests/observability_alert_test.go b/tests/pkg/tests/observability_alert_test.go index bd22345e2..c591e8185 100644 --- a/tests/pkg/tests/observability_alert_test.go +++ b/tests/pkg/tests/observability_alert_test.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -595,7 +594,7 @@ var _ = Describe("Observability:", func() { return false } - alertResult, err := ioutil.ReadAll(resp.Body) + alertResult, err := io.ReadAll(resp.Body) if err != nil { return false } diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index 3a864629d..3104928d8 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -5,7 +5,6 @@ package utils import ( - "io/ioutil" "os" "strings" "time" @@ -41,7 +40,7 @@ func GetGrafanaURL(opt TestOptions) string { } } - data, err := ioutil.ReadFile(optionsFile) + data, err := os.ReadFile(optionsFile) if err != nil { klog.Errorf("--options error: %v", err) } From 4010d13e7ef3bf928e02db7486f6108e5ad4e8c3 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 15:26:47 +0100 Subject: [PATCH 146/150] Cleanup of used vars Signed-off-by: Jacob Baungard Hansen --- tests/pkg/utils/mco_dashboard.go | 6 +++++- tests/pkg/utils/mco_grafana.go | 22 ---------------------- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/tests/pkg/utils/mco_dashboard.go b/tests/pkg/utils/mco_dashboard.go index 1c81c6d42..2e60698ea 100644 --- a/tests/pkg/utils/mco_dashboard.go +++ b/tests/pkg/utils/mco_dashboard.go @@ -17,6 +17,10 @@ import ( "k8s.io/klog" ) +const ( + trueStr = "true" +) + func ContainDashboard(opt TestOptions, title string) (error, bool) { grafanaConsoleURL := GetGrafanaURL(opt) path := "/api/search?" @@ -30,7 +34,7 @@ func ContainDashboard(opt TestOptions, title string) (error, bool) { } client := &http.Client{} - if os.Getenv("IS_KIND_ENV") != "true" { + if os.Getenv("IS_KIND_ENV") != trueStr { tr := &http.Transport{ // #nosec G402 -- Used in test. TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index 3104928d8..2db8f20e2 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -7,29 +7,19 @@ package utils import ( "os" "strings" - "time" "gopkg.in/yaml.v2" "k8s.io/klog" ) var ( - testHeadless bool BearerToken string - baseDomain string - kubeadminUser string - kubeadminCredential string - kubeconfig string - reportFile string optionsFile string - ownerPrefix, ocpRelease string testOptions TestOptions testOptionsContainer TestOptionsContainer - testUITimeout time.Duration - testFailed = false ) func GetGrafanaURL(opt TestOptions) string { @@ -52,18 +42,6 @@ func GetGrafanaURL(opt TestOptions) string { testOptions = testOptionsContainer.Options - // default Headless is `true` - // to disable, set Headless: false - // in options file - if testOptions.Headless == "" { - testHeadless = true - } else { - if testOptions.Headless == "false" { - testHeadless = false - } else { - testHeadless = true - } - } cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "rosa" substring2 := "hcp" From f0710aec0e51a429ccd0bb56b30fe3c7ae00e9d1 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 15:44:10 +0100 Subject: [PATCH 147/150] Format, clean unused code Signed-off-by: Jacob Baungard Hansen --- tests/pkg/utils/mco_grafana.go | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index 2db8f20e2..f8b714685 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -8,40 +8,15 @@ import ( "os" "strings" - "gopkg.in/yaml.v2" - "k8s.io/klog" ) var ( - - BearerToken string - optionsFile string - - testOptions TestOptions - testOptionsContainer TestOptionsContainer + BearerToken string + optionsFile string ) func GetGrafanaURL(opt TestOptions) string { - if optionsFile == "" { - optionsFile = os.Getenv("OPTIONS") - if optionsFile == "" { - optionsFile = "resources/options.yaml" - } - } - - data, err := os.ReadFile(optionsFile) - if err != nil { - klog.Errorf("--options error: %v", err) - } - - err = yaml.Unmarshal([]byte(data), &testOptionsContainer) - if err != nil { - klog.Errorf("--options error: %v", err) - } - - testOptions = testOptionsContainer.Options - cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "rosa" substring2 := "hcp" From 0b5dc4c6c8435fd476ab7e95fc99045f9c760f4e Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 15:47:47 +0100 Subject: [PATCH 148/150] Actually format... Signed-off-by: Jacob Baungard Hansen --- tests/pkg/utils/mco_grafana.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index f8b714685..4f1449fec 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -7,13 +7,11 @@ package utils import ( "os" "strings" - ) var ( BearerToken string optionsFile string - ) func GetGrafanaURL(opt TestOptions) string { From d7c98bcced89d93b01dde4b3f5c3fbbfbbfa7316 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 15:51:37 +0100 Subject: [PATCH 149/150] More unused code Signed-off-by: Jacob Baungard Hansen --- tests/pkg/utils/mco_grafana.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/pkg/utils/mco_grafana.go b/tests/pkg/utils/mco_grafana.go index 4f1449fec..19a1bab56 100644 --- a/tests/pkg/utils/mco_grafana.go +++ b/tests/pkg/utils/mco_grafana.go @@ -9,11 +9,6 @@ import ( "strings" ) -var ( - BearerToken string - optionsFile string -) - func GetGrafanaURL(opt TestOptions) string { cloudProvider := strings.ToLower(os.Getenv("CLOUD_PROVIDER")) substring1 := "rosa" From 39d46436b0883a8809853c79205b57384baa2956 Mon Sep 17 00:00:00 2001 From: Jacob Baungard Hansen Date: Thu, 19 Dec 2024 15:59:59 +0100 Subject: [PATCH 150/150] fix copyright Signed-off-by: Jacob Baungard Hansen --- tests/pkg/tests/observability_deployment_test.go | 3 ++- tests/pkg/utils/cluster_deploy.go | 3 ++- tests/pkg/utils/install_config.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/pkg/tests/observability_deployment_test.go b/tests/pkg/tests/observability_deployment_test.go index b955c0354..9e6a9b972 100644 --- a/tests/pkg/tests/observability_deployment_test.go +++ b/tests/pkg/tests/observability_deployment_test.go @@ -1,5 +1,6 @@ -// Copyright (c) 2021 Red Hat, Inc. +// Copyright (c) Red Hat, Inc. // Copyright Contributors to the Open Cluster Management project +// Licensed under the Apache License 2.0 package tests diff --git a/tests/pkg/utils/cluster_deploy.go b/tests/pkg/utils/cluster_deploy.go index b07075686..bed8e8206 100644 --- a/tests/pkg/utils/cluster_deploy.go +++ b/tests/pkg/utils/cluster_deploy.go @@ -1,5 +1,6 @@ -// Copyright (c) 2021 Red Hat, Inc. +// Copyright (c) Red Hat, Inc. // Copyright Contributors to the Open Cluster Management project +// Licensed under the Apache License 2.0 package utils diff --git a/tests/pkg/utils/install_config.go b/tests/pkg/utils/install_config.go index 70dad2f64..a146f05b4 100644 --- a/tests/pkg/utils/install_config.go +++ b/tests/pkg/utils/install_config.go @@ -1,5 +1,6 @@ -// Copyright (c) 2021 Red Hat, Inc. +// Copyright (c) Red Hat, Inc. // Copyright Contributors to the Open Cluster Management project +// Licensed under the Apache License 2.0 package utils