diff --git a/pkg/controllers/clusterinfo/capacity_controller.go b/pkg/controllers/clusterinfo/capacity_controller.go index 608798a0d..2d3553d02 100644 --- a/pkg/controllers/clusterinfo/capacity_controller.go +++ b/pkg/controllers/clusterinfo/capacity_controller.go @@ -6,6 +6,7 @@ import ( clusterinfov1beta1 "github.com/stolostron/cluster-lifecycle-api/clusterinfo/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -49,6 +50,12 @@ func (r *CapacityReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if !cluster.GetDeletionTimestamp().IsZero() { return reconcile.Result{}, nil } + + if !meta.IsStatusConditionTrue(cluster.Status.Conditions, clusterv1.ManagedClusterConditionAvailable) { + // only update the capacity when cluster is available + return reconcile.Result{}, nil + } + capacity := cluster.DeepCopy().Status.Capacity if capacity == nil { capacity = clusterv1.ResourceList{} diff --git a/pkg/controllers/clusterinfo/capacity_controller_test.go b/pkg/controllers/clusterinfo/capacity_controller_test.go index 173ffe972..a17c9e2e3 100644 --- a/pkg/controllers/clusterinfo/capacity_controller_test.go +++ b/pkg/controllers/clusterinfo/capacity_controller_test.go @@ -103,6 +103,12 @@ func newCluster(name string, resources map[clusterv1.ResourceName]int64) *cluste Name: name, }, Status: clusterv1.ManagedClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1.ManagedClusterConditionAvailable, + Status: metav1.ConditionTrue, + }, + }, Capacity: newCapacity(resources), }, } diff --git a/pkg/controllers/clusterinfo/clusterinfo_controller.go b/pkg/controllers/clusterinfo/clusterinfo_controller.go index bbadd28c9..2d08797ec 100644 --- a/pkg/controllers/clusterinfo/clusterinfo_controller.go +++ b/pkg/controllers/clusterinfo/clusterinfo_controller.go @@ -61,13 +61,13 @@ func SetupWithManager(mgr manager.Manager, logCertSecret string) error { klog.Warning("The log cert secret is not specified, ignore it") } - if err = add(mgr, newClusterInfoReconciler(mgr)); err != nil { + if err = add("clusterinfo-controller", mgr, newClusterInfoReconciler(mgr)); err != nil { return err } - if err = add(mgr, newAutoDetectReconciler(mgr)); err != nil { + if err = add("clusterdetector-controller", mgr, newAutoDetectReconciler(mgr)); err != nil { return err } - if err = add(mgr, newCapacityReconciler(mgr)); err != nil { + if err = add("clustercapcity-controller", mgr, newCapacityReconciler(mgr)); err != nil { return err } return nil @@ -82,9 +82,9 @@ func newClusterInfoReconciler(mgr manager.Manager) reconcile.Reconciler { } // add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { +func add(name string, mgr manager.Manager, r reconcile.Reconciler) error { // Create a new controller - c, err := controller.New("clusterinfo-controller", mgr, controller.Options{Reconciler: r}) + c, err := controller.New(name, mgr, controller.Options{Reconciler: r}) if err != nil { return err } @@ -150,16 +150,18 @@ func (r *ClusterInfoReconciler) Reconcile(ctx context.Context, req ctrl.Request) return reconcile.Result{}, err } - cluster.ObjectMeta.Finalizers = utils.RemoveString(cluster.ObjectMeta.Finalizers, clusterFinalizerName) - return reconcile.Result{}, r.client.Update(context.TODO(), cluster) + patch := client.MergeFrom(cluster.DeepCopy()) + cluster.Finalizers = utils.RemoveString(cluster.ObjectMeta.Finalizers, clusterFinalizerName) + return reconcile.Result{}, r.client.Patch(ctx, cluster, patch) } return reconcile.Result{}, nil } if !utils.ContainsString(cluster.GetFinalizers(), clusterFinalizerName) { - cluster.ObjectMeta.Finalizers = append(cluster.ObjectMeta.Finalizers, clusterFinalizerName) - return reconcile.Result{}, r.client.Update(context.TODO(), cluster) + patch := client.MergeFrom(cluster.DeepCopy()) + cluster.Finalizers = append(cluster.Finalizers, clusterFinalizerName) + return reconcile.Result{}, r.client.Patch(ctx, cluster, patch) } clusterInfo := &clusterinfov1beta1.ManagedClusterInfo{} diff --git a/test/e2e/clusterview_test.go b/test/e2e/clusterview_test.go index 441eba308..ee74dfdec 100644 --- a/test/e2e/clusterview_test.go +++ b/test/e2e/clusterview_test.go @@ -41,7 +41,7 @@ func validateClusterView(UserDynamicClient dynamic.Interface, ViewGVR, resourceG } if len(resourceList) != len(expectedNames) { - return fmt.Errorf("validateClusterView: reources count %v != expected count %v", len(resourceList), len(expectedNames)) + return fmt.Errorf("validateClusterView: reources count %v != expected count %v, resources: %+v", len(resourceList), len(expectedNames), resourceList) } for _, item := range resourceList { name, _, err := unstructured.NestedString(item.Object, "metadata", "name") @@ -168,15 +168,14 @@ var _ = ginkgo.Describe("Testing ClusterView to get managedClusters", func() { err = util.CleanManagedCluster(clusterClient, cluster3) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - clusterInRole := []string{cluster1, cluster3} + expectedClusters = []string{cluster1} rules = []rbacv1.PolicyRule{ helpers.NewRule("list", "watch").Groups("clusterview.open-cluster-management.io").Resources("managedclusters").RuleOrDie(), - helpers.NewRule("list", "get").Groups("cluster.open-cluster-management.io").Resources("managedclusters").Names(clusterInRole...).RuleOrDie(), + helpers.NewRule("list", "get").Groups("cluster.open-cluster-management.io").Resources("managedclusters").Names(expectedClusters...).RuleOrDie(), } err = util.UpdateClusterRole(kubeClient, clusterRoleName, rules) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - expectedClusters = []string{cluster1} gomega.Eventually(func() error { return validateClusterView(userDynamicClient, managedClusterViewGVR, util.ManagedClusterGVR, expectedClusters)