From c20e3b55fad4583a597cf02f7ba0b98498e76b02 Mon Sep 17 00:00:00 2001 From: Tamal Saha Date: Sat, 4 May 2024 10:37:29 +0600 Subject: [PATCH] Update deps Signed-off-by: Tamal Saha --- go.mod | 14 +- go.sum | 24 +- vendor/helm.sh/helm/v3/pkg/release/hook.go | 106 --- vendor/helm.sh/helm/v3/pkg/release/info.go | 40 -- vendor/helm.sh/helm/v3/pkg/release/mock.go | 116 ---- vendor/helm.sh/helm/v3/pkg/release/release.go | 49 -- .../helm.sh/helm/v3/pkg/release/responses.go | 24 - vendor/helm.sh/helm/v3/pkg/release/status.go | 49 -- .../helm.sh/helm/v3/pkg/releaseutil/filter.go | 78 --- .../helm/v3/pkg/releaseutil/kind_sorter.go | 160 ----- .../helm/v3/pkg/releaseutil/manifest.go | 72 -- .../v3/pkg/releaseutil/manifest_sorter.go | 233 ------- .../helm.sh/helm/v3/pkg/releaseutil/sorter.go | 78 --- vendor/helm.sh/helm/v3/pkg/time/time.go | 91 --- vendor/modules.txt | 26 +- .../pkg/addonfactory/addonfactory.go | 44 +- .../pkg/addonfactory/helm_agentaddon.go | 130 +--- .../pkg/addonfactory/template_agentaddon.go | 3 +- .../pkg/addonmanager/constants/constants.go | 8 +- .../controllers/addoninstall/controller.go | 138 ++++ .../controllers/agentdeploy/controller.go | 204 +++--- .../agentdeploy/default_hook_sync.go | 8 +- .../controllers/agentdeploy/default_sync.go | 6 +- .../agentdeploy/hosted_hook_sync.go | 10 +- .../controllers/agentdeploy/hosted_sync.go | 10 +- .../controllers/agentdeploy/utils.go | 20 +- .../controllers/certificate/csrsign.go | 2 +- .../controller.go | 33 +- .../controller.go | 20 +- .../pkg/addonmanager/manager.go | 45 +- .../addon-framework/pkg/agent/inteface.go | 81 ++- .../addon_configuration_reconciler.go | 114 ++++ .../addonconfiguration/controller.go | 196 ++++++ .../controllers/addonconfiguration/graph.go | 417 ++++++++++++ .../mgmt_addon_progressing_reconciler.go | 143 ++++ .../controllers/addonowner/controller.go | 100 +++ .../pkg/apis/cluster/v1alpha1/rollout.go | 616 ++++++++++++++++++ .../pkg/apis/cluster/v1beta1/placement.go | 273 ++++++++ .../pkg/apis/work/v1/applier/workapplier.go | 2 +- .../controller-runtime/pkg/cache/cache.go | 42 +- .../pkg/client/apiutil/restmapper.go | 65 +- .../pkg/manager/internal.go | 2 + .../pkg/manager/runnable_group.go | 15 +- 43 files changed, 2434 insertions(+), 1473 deletions(-) delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/hook.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/info.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/mock.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/release.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/responses.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/release/status.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go delete mode 100644 vendor/helm.sh/helm/v3/pkg/time/time.go create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go rename vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/{cmamanagedby => managementaddon}/controller.go (75%) rename vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/{cmaconfig => managementaddonconfig}/controller.go (89%) create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go create mode 100644 vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/rollout.go create mode 100644 vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1/placement.go diff --git a/go.mod b/go.mod index 32d92d85..4cf0ccf8 100644 --- a/go.mod +++ b/go.mod @@ -12,13 +12,13 @@ require ( k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 k8s.io/klog/v2 v2.120.1 - k8s.io/utils v0.0.0-20240102154912-e7106e64919e + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 kmodules.xyz/client-go v0.29.13 - open-cluster-management.io/addon-framework v0.9.1-0.20240402013859-be542a6d0a9c + open-cluster-management.io/addon-framework v0.9.2 open-cluster-management.io/api v0.13.0 open-cluster-management.io/managed-serviceaccount v0.5.0 - open-cluster-management.io/sdk-go v0.13.0 - sigs.k8s.io/controller-runtime v0.17.2 + open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 + sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/yaml v1.4.0 ) @@ -103,7 +103,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect @@ -123,7 +123,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.14.2 // indirect + helm.sh/helm/v3 v3.14.4 // indirect k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/apiserver v0.29.2 // indirect k8s.io/cli-runtime v0.29.2 // indirect @@ -137,7 +137,7 @@ require ( replace github.com/Masterminds/sprig/v3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8 -replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.16.1-0.20240128092212-43c4e15c56b1 +replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.17.4-0.20240410011645-f1b2f533ea66 replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.6 diff --git a/go.sum b/go.sum index 28c10973..b6ce0e89 100644 --- a/go.sum +++ b/go.sum @@ -218,8 +218,8 @@ github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBF github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573 h1:6v7bTFGH/Ha1idq1sLX9px2KJhcx6cpuMowuYRyCht4= github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM= -github.com/kmodules/controller-runtime v0.16.1-0.20240128092212-43c4e15c56b1 h1:Pq/2P0Wp7HANIM6vd6YqQxZTEqlqifAzfw1Bz/CR0zo= -github.com/kmodules/controller-runtime v0.16.1-0.20240128092212-43c4e15c56b1/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +github.com/kmodules/controller-runtime v0.17.4-0.20240410011645-f1b2f533ea66 h1:qutDfX9//kt8JiEM+blqRWTwE6Qt7vho0GY6oZfckmI= +github.com/kmodules/controller-runtime v0.17.4-0.20240410011645-f1b2f533ea66/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -459,8 +459,8 @@ golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -624,8 +624,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -helm.sh/helm/v3 v3.14.2 h1:V71fv+NGZv0icBlr+in1MJXuUIHCiPG1hW9gEBISTIA= -helm.sh/helm/v3 v3.14.2/go.mod h1:2itvvDv2WSZXTllknfQo6j7u3VVgMAvm8POCDgYH424= +helm.sh/helm/v3 v3.14.4 h1:6FSpEfqyDalHq3kUr4gOMThhgY55kXUEjdQoyODYnrM= +helm.sh/helm/v3 v3.14.4/go.mod h1:Tje7LL4gprZpuBNTbG34d1Xn5NmRT3OWfBRwpOSer9I= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -648,18 +648,18 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kmodules.xyz/client-go v0.29.13 h1:BnSVgcTQgiuTCASgL7Hr8i6mrelAy0PhhtaTUYEyUdc= kmodules.xyz/client-go v0.29.13/go.mod h1:yfJSSwYYBX/60165BsRx8RiQsYu2NzvBC+zRwviAICQ= -open-cluster-management.io/addon-framework v0.9.1-0.20240402013859-be542a6d0a9c h1:0CZbptuATWUgJXFHEkfEk2/88POzrbnhhjMi3/39FQ4= -open-cluster-management.io/addon-framework v0.9.1-0.20240402013859-be542a6d0a9c/go.mod h1:nQMHHshMfMNj4qdwg/4oMqRf42FQU6EYy68o2HsLgn4= +open-cluster-management.io/addon-framework v0.9.2 h1:oQnk6Y6433Fvi/MC8sWoy68lHzkqPsFLj7IEx07kFfU= +open-cluster-management.io/addon-framework v0.9.2/go.mod h1:LDkGLGTQh+sthF1qWlv87iMeAuRPsNEMK31O14kMneA= open-cluster-management.io/api v0.13.0 h1:dlcJEZlNlE0DmSDctK2s7iWKg9l+Tgb0V78Z040nMuk= open-cluster-management.io/api v0.13.0/go.mod h1:CuCPEzXDvOyxBB0H1d1eSeajbHqaeGEKq9c63vQc63w= open-cluster-management.io/managed-serviceaccount v0.5.0 h1:yEFTlwPp3tecorzHLwa71mIJzwQtxcwRUnUhRF4OX0U= open-cluster-management.io/managed-serviceaccount v0.5.0/go.mod h1:pvSKkwFynokhtV7ksN1z0BNWQ37bG8FudOfmjn55ciA= -open-cluster-management.io/sdk-go v0.13.0 h1:ddMGsPUekQr9z03tVN6vF39Uf+WEKMtGU/xSd81HdoA= -open-cluster-management.io/sdk-go v0.13.0/go.mod h1:UnsjzYOrDTF9a8rHEXksoIAtAdO1o5CD5Jtaw6T5B9w= +open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 h1:8jXVHfgy+wgXq1mrWC1mTieoP77WsAAHNpzILMIzWB0= +open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379/go.mod h1:w2OaxtCyegxeyFLU42UQ3oxUz01QdsBQkcHI17T/l48= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/vendor/helm.sh/helm/v3/pkg/release/hook.go b/vendor/helm.sh/helm/v3/pkg/release/hook.go deleted file mode 100644 index cb995558..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/hook.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -import ( - "helm.sh/helm/v3/pkg/time" -) - -// HookEvent specifies the hook event -type HookEvent string - -// Hook event types -const ( - HookPreInstall HookEvent = "pre-install" - HookPostInstall HookEvent = "post-install" - HookPreDelete HookEvent = "pre-delete" - HookPostDelete HookEvent = "post-delete" - HookPreUpgrade HookEvent = "pre-upgrade" - HookPostUpgrade HookEvent = "post-upgrade" - HookPreRollback HookEvent = "pre-rollback" - HookPostRollback HookEvent = "post-rollback" - HookTest HookEvent = "test" -) - -func (x HookEvent) String() string { return string(x) } - -// HookDeletePolicy specifies the hook delete policy -type HookDeletePolicy string - -// Hook delete policy types -const ( - HookSucceeded HookDeletePolicy = "hook-succeeded" - HookFailed HookDeletePolicy = "hook-failed" - HookBeforeHookCreation HookDeletePolicy = "before-hook-creation" -) - -func (x HookDeletePolicy) String() string { return string(x) } - -// HookAnnotation is the label name for a hook -const HookAnnotation = "helm.sh/hook" - -// HookWeightAnnotation is the label name for a hook weight -const HookWeightAnnotation = "helm.sh/hook-weight" - -// HookDeleteAnnotation is the label name for the delete policy for a hook -const HookDeleteAnnotation = "helm.sh/hook-delete-policy" - -// Hook defines a hook object. -type Hook struct { - Name string `json:"name,omitempty"` - // Kind is the Kubernetes kind. - Kind string `json:"kind,omitempty"` - // Path is the chart-relative path to the template. - Path string `json:"path,omitempty"` - // Manifest is the manifest contents. - Manifest string `json:"manifest,omitempty"` - // Events are the events that this hook fires on. - Events []HookEvent `json:"events,omitempty"` - // LastRun indicates the date/time this was last run. - LastRun HookExecution `json:"last_run,omitempty"` - // Weight indicates the sort order for execution among similar Hook type - Weight int `json:"weight,omitempty"` - // DeletePolicies are the policies that indicate when to delete the hook - DeletePolicies []HookDeletePolicy `json:"delete_policies,omitempty"` -} - -// A HookExecution records the result for the last execution of a hook for a given release. -type HookExecution struct { - // StartedAt indicates the date/time this hook was started - StartedAt time.Time `json:"started_at,omitempty"` - // CompletedAt indicates the date/time this hook was completed. - CompletedAt time.Time `json:"completed_at,omitempty"` - // Phase indicates whether the hook completed successfully - Phase HookPhase `json:"phase"` -} - -// A HookPhase indicates the state of a hook execution -type HookPhase string - -const ( - // HookPhaseUnknown indicates that a hook is in an unknown state - HookPhaseUnknown HookPhase = "Unknown" - // HookPhaseRunning indicates that a hook is currently executing - HookPhaseRunning HookPhase = "Running" - // HookPhaseSucceeded indicates that hook execution succeeded - HookPhaseSucceeded HookPhase = "Succeeded" - // HookPhaseFailed indicates that hook execution failed - HookPhaseFailed HookPhase = "Failed" -) - -// String converts a hook phase to a printable string -func (x HookPhase) String() string { return string(x) } diff --git a/vendor/helm.sh/helm/v3/pkg/release/info.go b/vendor/helm.sh/helm/v3/pkg/release/info.go deleted file mode 100644 index b030a8a5..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/info.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -import ( - "k8s.io/apimachinery/pkg/runtime" - - "helm.sh/helm/v3/pkg/time" -) - -// Info describes release information. -type Info struct { - // FirstDeployed is when the release was first deployed. - FirstDeployed time.Time `json:"first_deployed,omitempty"` - // LastDeployed is when the release was last deployed. - LastDeployed time.Time `json:"last_deployed,omitempty"` - // Deleted tracks when this object was deleted. - Deleted time.Time `json:"deleted"` - // Description is human-friendly "log entry" about this release. - Description string `json:"description,omitempty"` - // Status is the current state of the release - Status Status `json:"status,omitempty"` - // Contains the rendered templates/NOTES.txt if available - Notes string `json:"notes,omitempty"` - // Contains the deployed resources information - Resources map[string][]runtime.Object `json:"resources,omitempty"` -} diff --git a/vendor/helm.sh/helm/v3/pkg/release/mock.go b/vendor/helm.sh/helm/v3/pkg/release/mock.go deleted file mode 100644 index a28e1dc1..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/mock.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -import ( - "fmt" - "math/rand" - - "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/time" -) - -// MockHookTemplate is the hook template used for all mock release objects. -var MockHookTemplate = `apiVersion: v1 -kind: Job -metadata: - annotations: - "helm.sh/hook": pre-install -` - -// MockManifest is the manifest used for all mock release objects. -var MockManifest = `apiVersion: v1 -kind: Secret -metadata: - name: fixture -` - -// MockReleaseOptions allows for user-configurable options on mock release objects. -type MockReleaseOptions struct { - Name string - Version int - Chart *chart.Chart - Status Status - Namespace string -} - -// Mock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing. -func Mock(opts *MockReleaseOptions) *Release { - date := time.Unix(242085845, 0).UTC() - - name := opts.Name - if name == "" { - name = "testrelease-" + fmt.Sprint(rand.Intn(100)) - } - - version := 1 - if opts.Version != 0 { - version = opts.Version - } - - namespace := opts.Namespace - if namespace == "" { - namespace = "default" - } - - ch := opts.Chart - if opts.Chart == nil { - ch = &chart.Chart{ - Metadata: &chart.Metadata{ - Name: "foo", - Version: "0.1.0-beta.1", - AppVersion: "1.0", - }, - Templates: []*chart.File{ - {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, - }, - } - } - - scode := StatusDeployed - if len(opts.Status) > 0 { - scode = opts.Status - } - - info := &Info{ - FirstDeployed: date, - LastDeployed: date, - Status: scode, - Description: "Release mock", - Notes: "Some mock release notes!", - } - - return &Release{ - Name: name, - Info: info, - Chart: ch, - Config: map[string]interface{}{"name": "value"}, - Version: version, - Namespace: namespace, - Hooks: []*Hook{ - { - Name: "pre-install-hook", - Kind: "Job", - Path: "pre-install-hook.yaml", - Manifest: MockHookTemplate, - LastRun: HookExecution{}, - Events: []HookEvent{HookPreInstall}, - }, - }, - Manifest: MockManifest, - } -} diff --git a/vendor/helm.sh/helm/v3/pkg/release/release.go b/vendor/helm.sh/helm/v3/pkg/release/release.go deleted file mode 100644 index b9061287..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/release.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -import "helm.sh/helm/v3/pkg/chart" - -// Release describes a deployment of a chart, together with the chart -// and the variables used to deploy that chart. -type Release struct { - // Name is the name of the release - Name string `json:"name,omitempty"` - // Info provides information about a release - Info *Info `json:"info,omitempty"` - // Chart is the chart that was released. - Chart *chart.Chart `json:"chart,omitempty"` - // Config is the set of extra Values added to the chart. - // These values override the default values inside of the chart. - Config map[string]interface{} `json:"config,omitempty"` - // Manifest is the string representation of the rendered template. - Manifest string `json:"manifest,omitempty"` - // Hooks are all of the hooks declared for this release. - Hooks []*Hook `json:"hooks,omitempty"` - // Version is an int which represents the revision of the release. - Version int `json:"version,omitempty"` - // Namespace is the kubernetes namespace of the release. - Namespace string `json:"namespace,omitempty"` - // Labels of the release. - // Disabled encoding into Json cause labels are stored in storage driver metadata field. - Labels map[string]string `json:"-"` -} - -// SetStatus is a helper for setting the status on a release. -func (r *Release) SetStatus(status Status, msg string) { - r.Info.Status = status - r.Info.Description = msg -} diff --git a/vendor/helm.sh/helm/v3/pkg/release/responses.go b/vendor/helm.sh/helm/v3/pkg/release/responses.go deleted file mode 100644 index 7ee1fc2e..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/responses.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -// UninstallReleaseResponse represents a successful response to an uninstall request. -type UninstallReleaseResponse struct { - // Release is the release that was marked deleted. - Release *Release `json:"release,omitempty"` - // Info is an uninstall message - Info string `json:"info,omitempty"` -} diff --git a/vendor/helm.sh/helm/v3/pkg/release/status.go b/vendor/helm.sh/helm/v3/pkg/release/status.go deleted file mode 100644 index e0e3ed62..00000000 --- a/vendor/helm.sh/helm/v3/pkg/release/status.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright The Helm Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package release - -// Status is the status of a release -type Status string - -// Describe the status of a release -// NOTE: Make sure to update cmd/helm/status.go when adding or modifying any of these statuses. -const ( - // StatusUnknown indicates that a release is in an uncertain state. - StatusUnknown Status = "unknown" - // StatusDeployed indicates that the release has been pushed to Kubernetes. - StatusDeployed Status = "deployed" - // StatusUninstalled indicates that a release has been uninstalled from Kubernetes. - StatusUninstalled Status = "uninstalled" - // StatusSuperseded indicates that this release object is outdated and a newer one exists. - StatusSuperseded Status = "superseded" - // StatusFailed indicates that the release was not successfully deployed. - StatusFailed Status = "failed" - // StatusUninstalling indicates that a uninstall operation is underway. - StatusUninstalling Status = "uninstalling" - // StatusPendingInstall indicates that an install operation is underway. - StatusPendingInstall Status = "pending-install" - // StatusPendingUpgrade indicates that an upgrade operation is underway. - StatusPendingUpgrade Status = "pending-upgrade" - // StatusPendingRollback indicates that an rollback operation is underway. - StatusPendingRollback Status = "pending-rollback" -) - -func (x Status) String() string { return string(x) } - -// IsPending determines if this status is a state or a transition. -func (x Status) IsPending() bool { - return x == StatusPendingInstall || x == StatusPendingUpgrade || x == StatusPendingRollback -} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go deleted file mode 100644 index dbd0df8e..00000000 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/filter.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" - -import rspb "helm.sh/helm/v3/pkg/release" - -// FilterFunc returns true if the release object satisfies -// the predicate of the underlying filter func. -type FilterFunc func(*rspb.Release) bool - -// Check applies the FilterFunc to the release object. -func (fn FilterFunc) Check(rls *rspb.Release) bool { - if rls == nil { - return false - } - return fn(rls) -} - -// Filter applies the filter(s) to the list of provided releases -// returning the list that satisfies the filtering predicate. -func (fn FilterFunc) Filter(rels []*rspb.Release) (rets []*rspb.Release) { - for _, rel := range rels { - if fn.Check(rel) { - rets = append(rets, rel) - } - } - return -} - -// Any returns a FilterFunc that filters a list of releases -// determined by the predicate 'f0 || f1 || ... || fn'. -func Any(filters ...FilterFunc) FilterFunc { - return func(rls *rspb.Release) bool { - for _, filter := range filters { - if filter(rls) { - return true - } - } - return false - } -} - -// All returns a FilterFunc that filters a list of releases -// determined by the predicate 'f0 && f1 && ... && fn'. -func All(filters ...FilterFunc) FilterFunc { - return func(rls *rspb.Release) bool { - for _, filter := range filters { - if !filter(rls) { - return false - } - } - return true - } -} - -// StatusFilter filters a set of releases by status code. -func StatusFilter(status rspb.Status) FilterFunc { - return FilterFunc(func(rls *rspb.Release) bool { - if rls == nil { - return true - } - return rls.Info.Status == status - }) -} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go deleted file mode 100644 index bb8e84dd..00000000 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/kind_sorter.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil - -import ( - "sort" - - "helm.sh/helm/v3/pkg/release" -) - -// KindSortOrder is an ordering of Kinds. -type KindSortOrder []string - -// InstallOrder is the order in which manifests should be installed (by Kind). -// -// Those occurring earlier in the list get installed before those occurring later in the list. -var InstallOrder KindSortOrder = []string{ - "PriorityClass", - "Namespace", - "NetworkPolicy", - "ResourceQuota", - "LimitRange", - "PodSecurityPolicy", - "PodDisruptionBudget", - "ServiceAccount", - "Secret", - "SecretList", - "ConfigMap", - "StorageClass", - "PersistentVolume", - "PersistentVolumeClaim", - "CustomResourceDefinition", - "ClusterRole", - "ClusterRoleList", - "ClusterRoleBinding", - "ClusterRoleBindingList", - "Role", - "RoleList", - "RoleBinding", - "RoleBindingList", - "Service", - "DaemonSet", - "Pod", - "ReplicationController", - "ReplicaSet", - "Deployment", - "HorizontalPodAutoscaler", - "StatefulSet", - "Job", - "CronJob", - "IngressClass", - "Ingress", - "APIService", -} - -// UninstallOrder is the order in which manifests should be uninstalled (by Kind). -// -// Those occurring earlier in the list get uninstalled before those occurring later in the list. -var UninstallOrder KindSortOrder = []string{ - "APIService", - "Ingress", - "IngressClass", - "Service", - "CronJob", - "Job", - "StatefulSet", - "HorizontalPodAutoscaler", - "Deployment", - "ReplicaSet", - "ReplicationController", - "Pod", - "DaemonSet", - "RoleBindingList", - "RoleBinding", - "RoleList", - "Role", - "ClusterRoleBindingList", - "ClusterRoleBinding", - "ClusterRoleList", - "ClusterRole", - "CustomResourceDefinition", - "PersistentVolumeClaim", - "PersistentVolume", - "StorageClass", - "ConfigMap", - "SecretList", - "Secret", - "ServiceAccount", - "PodDisruptionBudget", - "PodSecurityPolicy", - "LimitRange", - "ResourceQuota", - "NetworkPolicy", - "Namespace", - "PriorityClass", -} - -// sort manifests by kind. -// -// Results are sorted by 'ordering', keeping order of items with equal kind/priority -func sortManifestsByKind(manifests []Manifest, ordering KindSortOrder) []Manifest { - sort.SliceStable(manifests, func(i, j int) bool { - return lessByKind(manifests[i], manifests[j], manifests[i].Head.Kind, manifests[j].Head.Kind, ordering) - }) - - return manifests -} - -// sort hooks by kind, using an out-of-place sort to preserve the input parameters. -// -// Results are sorted by 'ordering', keeping order of items with equal kind/priority -func sortHooksByKind(hooks []*release.Hook, ordering KindSortOrder) []*release.Hook { - h := hooks - sort.SliceStable(h, func(i, j int) bool { - return lessByKind(h[i], h[j], h[i].Kind, h[j].Kind, ordering) - }) - - return h -} - -func lessByKind(_ interface{}, _ interface{}, kindA string, kindB string, o KindSortOrder) bool { - ordering := make(map[string]int, len(o)) - for v, k := range o { - ordering[k] = v - } - - first, aok := ordering[kindA] - second, bok := ordering[kindB] - - if !aok && !bok { - // if both are unknown then sort alphabetically by kind, keep original order if same kind - if kindA != kindB { - return kindA < kindB - } - return first < second - } - // unknown kind is last - if !aok { - return false - } - if !bok { - return true - } - // sort different kinds, keep original order if same priority - return first < second -} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go deleted file mode 100644 index 0b04a459..00000000 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// SimpleHead defines what the structure of the head of a manifest file -type SimpleHead struct { - Version string `json:"apiVersion"` - Kind string `json:"kind,omitempty"` - Metadata *struct { - Name string `json:"name"` - Annotations map[string]string `json:"annotations"` - } `json:"metadata,omitempty"` -} - -var sep = regexp.MustCompile("(?:^|\\s*\n)---\\s*") - -// SplitManifests takes a string of manifest and returns a map contains individual manifests -func SplitManifests(bigFile string) map[string]string { - // Basically, we're quickly splitting a stream of YAML documents into an - // array of YAML docs. The file name is just a place holder, but should be - // integer-sortable so that manifests get output in the same order as the - // input (see `BySplitManifestsOrder`). - tpl := "manifest-%d" - res := map[string]string{} - // Making sure that any extra whitespace in YAML stream doesn't interfere in splitting documents correctly. - bigFileTmp := strings.TrimSpace(bigFile) - docs := sep.Split(bigFileTmp, -1) - var count int - for _, d := range docs { - if d == "" { - continue - } - - d = strings.TrimSpace(d) - res[fmt.Sprintf(tpl, count)] = d - count = count + 1 - } - return res -} - -// BySplitManifestsOrder sorts by in-file manifest order, as provided in function `SplitManifests` -type BySplitManifestsOrder []string - -func (a BySplitManifestsOrder) Len() int { return len(a) } -func (a BySplitManifestsOrder) Less(i, j int) bool { - // Split `manifest-%d` - anum, _ := strconv.ParseInt(a[i][len("manifest-"):], 10, 0) - bnum, _ := strconv.ParseInt(a[j][len("manifest-"):], 10, 0) - return anum < bnum -} -func (a BySplitManifestsOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go deleted file mode 100644 index 413de30e..00000000 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/manifest_sorter.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil - -import ( - "log" - "path" - "sort" - "strconv" - "strings" - - "github.com/pkg/errors" - "sigs.k8s.io/yaml" - - "helm.sh/helm/v3/pkg/chartutil" - "helm.sh/helm/v3/pkg/release" -) - -// Manifest represents a manifest file, which has a name and some content. -type Manifest struct { - Name string - Content string - Head *SimpleHead -} - -// manifestFile represents a file that contains a manifest. -type manifestFile struct { - entries map[string]string - path string - apis chartutil.VersionSet -} - -// result is an intermediate structure used during sorting. -type result struct { - hooks []*release.Hook - generic []Manifest -} - -// TODO: Refactor this out. It's here because naming conventions were not followed through. -// So fix the Test hook names and then remove this. -var events = map[string]release.HookEvent{ - release.HookPreInstall.String(): release.HookPreInstall, - release.HookPostInstall.String(): release.HookPostInstall, - release.HookPreDelete.String(): release.HookPreDelete, - release.HookPostDelete.String(): release.HookPostDelete, - release.HookPreUpgrade.String(): release.HookPreUpgrade, - release.HookPostUpgrade.String(): release.HookPostUpgrade, - release.HookPreRollback.String(): release.HookPreRollback, - release.HookPostRollback.String(): release.HookPostRollback, - release.HookTest.String(): release.HookTest, - // Support test-success for backward compatibility with Helm 2 tests - "test-success": release.HookTest, -} - -// SortManifests takes a map of filename/YAML contents, splits the file -// by manifest entries, and sorts the entries into hook types. -// -// The resulting hooks struct will be populated with all of the generated hooks. -// Any file that does not declare one of the hook types will be placed in the -// 'generic' bucket. -// -// Files that do not parse into the expected format are simply placed into a map and -// returned. -func SortManifests(files map[string]string, apis chartutil.VersionSet, ordering KindSortOrder) ([]*release.Hook, []Manifest, error) { - result := &result{} - - var sortedFilePaths []string - for filePath := range files { - sortedFilePaths = append(sortedFilePaths, filePath) - } - sort.Strings(sortedFilePaths) - - for _, filePath := range sortedFilePaths { - content := files[filePath] - - // Skip partials. We could return these as a separate map, but there doesn't - // seem to be any need for that at this time. - if strings.HasPrefix(path.Base(filePath), "_") { - continue - } - // Skip empty files and log this. - if strings.TrimSpace(content) == "" { - continue - } - - manifestFile := &manifestFile{ - entries: SplitManifests(content), - path: filePath, - apis: apis, - } - - if err := manifestFile.sort(result); err != nil { - return result.hooks, result.generic, err - } - } - - return sortHooksByKind(result.hooks, ordering), sortManifestsByKind(result.generic, ordering), nil -} - -// sort takes a manifestFile object which may contain multiple resource definition -// entries and sorts each entry by hook types, and saves the resulting hooks and -// generic manifests (or non-hooks) to the result struct. -// -// To determine hook type, it looks for a YAML structure like this: -// -// kind: SomeKind -// apiVersion: v1 -// metadata: -// annotations: -// helm.sh/hook: pre-install -// -// To determine the policy to delete the hook, it looks for a YAML structure like this: -// -// kind: SomeKind -// apiVersion: v1 -// metadata: -// annotations: -// helm.sh/hook-delete-policy: hook-succeeded -func (file *manifestFile) sort(result *result) error { - // Go through manifests in order found in file (function `SplitManifests` creates integer-sortable keys) - var sortedEntryKeys []string - for entryKey := range file.entries { - sortedEntryKeys = append(sortedEntryKeys, entryKey) - } - sort.Sort(BySplitManifestsOrder(sortedEntryKeys)) - - for _, entryKey := range sortedEntryKeys { - m := file.entries[entryKey] - - var entry SimpleHead - if err := yaml.Unmarshal([]byte(m), &entry); err != nil { - return errors.Wrapf(err, "YAML parse error on %s", file.path) - } - - if !hasAnyAnnotation(entry) { - result.generic = append(result.generic, Manifest{ - Name: file.path, - Content: m, - Head: &entry, - }) - continue - } - - hookTypes, ok := entry.Metadata.Annotations[release.HookAnnotation] - if !ok { - result.generic = append(result.generic, Manifest{ - Name: file.path, - Content: m, - Head: &entry, - }) - continue - } - - hw := calculateHookWeight(entry) - - h := &release.Hook{ - Name: entry.Metadata.Name, - Kind: entry.Kind, - Path: file.path, - Manifest: m, - Events: []release.HookEvent{}, - Weight: hw, - DeletePolicies: []release.HookDeletePolicy{}, - } - - isUnknownHook := false - for _, hookType := range strings.Split(hookTypes, ",") { - hookType = strings.ToLower(strings.TrimSpace(hookType)) - e, ok := events[hookType] - if !ok { - isUnknownHook = true - break - } - h.Events = append(h.Events, e) - } - - if isUnknownHook { - log.Printf("info: skipping unknown hook: %q", hookTypes) - continue - } - - result.hooks = append(result.hooks, h) - - operateAnnotationValues(entry, release.HookDeleteAnnotation, func(value string) { - h.DeletePolicies = append(h.DeletePolicies, release.HookDeletePolicy(value)) - }) - } - - return nil -} - -// hasAnyAnnotation returns true if the given entry has any annotations at all. -func hasAnyAnnotation(entry SimpleHead) bool { - return entry.Metadata != nil && - entry.Metadata.Annotations != nil && - len(entry.Metadata.Annotations) != 0 -} - -// calculateHookWeight finds the weight in the hook weight annotation. -// -// If no weight is found, the assigned weight is 0 -func calculateHookWeight(entry SimpleHead) int { - hws := entry.Metadata.Annotations[release.HookWeightAnnotation] - hw, err := strconv.Atoi(hws) - if err != nil { - hw = 0 - } - return hw -} - -// operateAnnotationValues finds the given annotation and runs the operate function with the value of that annotation -func operateAnnotationValues(entry SimpleHead, annotation string, operate func(p string)) { - if dps, ok := entry.Metadata.Annotations[annotation]; ok { - for _, dp := range strings.Split(dps, ",") { - dp = strings.ToLower(strings.TrimSpace(dp)) - operate(dp) - } - } -} diff --git a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go b/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go deleted file mode 100644 index 1a8aa78a..00000000 --- a/vendor/helm.sh/helm/v3/pkg/releaseutil/sorter.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package releaseutil // import "helm.sh/helm/v3/pkg/releaseutil" - -import ( - "sort" - - rspb "helm.sh/helm/v3/pkg/release" -) - -type list []*rspb.Release - -func (s list) Len() int { return len(s) } -func (s list) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// ByName sorts releases by name -type ByName struct{ list } - -// Less compares to releases -func (s ByName) Less(i, j int) bool { return s.list[i].Name < s.list[j].Name } - -// ByDate sorts releases by date -type ByDate struct{ list } - -// Less compares to releases -func (s ByDate) Less(i, j int) bool { - ti := s.list[i].Info.LastDeployed.Unix() - tj := s.list[j].Info.LastDeployed.Unix() - return ti < tj -} - -// ByRevision sorts releases by revision number -type ByRevision struct{ list } - -// Less compares to releases -func (s ByRevision) Less(i, j int) bool { - return s.list[i].Version < s.list[j].Version -} - -// Reverse reverses the list of releases sorted by the sort func. -func Reverse(list []*rspb.Release, sortFn func([]*rspb.Release)) { - sortFn(list) - for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 { - list[i], list[j] = list[j], list[i] - } -} - -// SortByName returns the list of releases sorted -// in lexicographical order. -func SortByName(list []*rspb.Release) { - sort.Sort(ByName{list}) -} - -// SortByDate returns the list of releases sorted by a -// release's last deployed time (in seconds). -func SortByDate(list []*rspb.Release) { - sort.Sort(ByDate{list}) -} - -// SortByRevision returns the list of releases sorted by a -// release's revision number (release.Version). -func SortByRevision(list []*rspb.Release) { - sort.Sort(ByRevision{list}) -} diff --git a/vendor/helm.sh/helm/v3/pkg/time/time.go b/vendor/helm.sh/helm/v3/pkg/time/time.go deleted file mode 100644 index 44f3fedf..00000000 --- a/vendor/helm.sh/helm/v3/pkg/time/time.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright The Helm Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package time contains a wrapper for time.Time in the standard library and -// associated methods. This package mainly exists to workaround an issue in Go -// where the serializer doesn't omit an empty value for time: -// https://github.com/golang/go/issues/11939. As such, this can be removed if a -// proposal is ever accepted for Go -package time - -import ( - "bytes" - "time" -) - -// emptyString contains an empty JSON string value to be used as output -var emptyString = `""` - -// Time is a convenience wrapper around stdlib time, but with different -// marshalling and unmarshaling for zero values -type Time struct { - time.Time -} - -// Now returns the current time. It is a convenience wrapper around time.Now() -func Now() Time { - return Time{time.Now()} -} - -func (t Time) MarshalJSON() ([]byte, error) { - if t.Time.IsZero() { - return []byte(emptyString), nil - } - - return t.Time.MarshalJSON() -} - -func (t *Time) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, []byte("null")) { - return nil - } - // If it is empty, we don't have to set anything since time.Time is not a - // pointer and will be set to the zero value - if bytes.Equal([]byte(emptyString), b) { - return nil - } - - return t.Time.UnmarshalJSON(b) -} - -func Parse(layout, value string) (Time, error) { - t, err := time.Parse(layout, value) - return Time{Time: t}, err -} -func ParseInLocation(layout, value string, loc *time.Location) (Time, error) { - t, err := time.ParseInLocation(layout, value, loc) - return Time{Time: t}, err -} - -func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { - return Time{Time: time.Date(year, month, day, hour, min, sec, nsec, loc)} -} - -func Unix(sec int64, nsec int64) Time { return Time{Time: time.Unix(sec, nsec)} } - -func (t Time) Add(d time.Duration) Time { return Time{Time: t.Time.Add(d)} } -func (t Time) AddDate(years int, months int, days int) Time { - return Time{Time: t.Time.AddDate(years, months, days)} -} -func (t Time) After(u Time) bool { return t.Time.After(u.Time) } -func (t Time) Before(u Time) bool { return t.Time.Before(u.Time) } -func (t Time) Equal(u Time) bool { return t.Time.Equal(u.Time) } -func (t Time) In(loc *time.Location) Time { return Time{Time: t.Time.In(loc)} } -func (t Time) Local() Time { return Time{Time: t.Time.Local()} } -func (t Time) Round(d time.Duration) Time { return Time{Time: t.Time.Round(d)} } -func (t Time) Sub(u Time) time.Duration { return t.Time.Sub(u.Time) } -func (t Time) Truncate(d time.Duration) Time { return Time{Time: t.Time.Truncate(d)} } -func (t Time) UTC() Time { return Time{Time: t.Time.UTC()} } diff --git a/vendor/modules.txt b/vendor/modules.txt index 70be46b1..e751bbce 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -302,7 +302,7 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.15.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -419,7 +419,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.14.2 +# helm.sh/helm/v3 v3.14.4 ## explicit; go 1.21 helm.sh/helm/v3/internal/sympath helm.sh/helm/v3/internal/version @@ -428,9 +428,6 @@ helm.sh/helm/v3/pkg/chart/loader helm.sh/helm/v3/pkg/chartutil helm.sh/helm/v3/pkg/engine helm.sh/helm/v3/pkg/ignore -helm.sh/helm/v3/pkg/release -helm.sh/helm/v3/pkg/releaseutil -helm.sh/helm/v3/pkg/time # k8s.io/api v0.29.2 ## explicit; go 1.21 k8s.io/api/admission/v1 @@ -913,7 +910,7 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20240102154912-e7106e64919e +# k8s.io/utils v0.0.0-20240310230437-4693a0247e57 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -934,21 +931,24 @@ kmodules.xyz/client-go/client/apiutil kmodules.xyz/client-go/core/v1 kmodules.xyz/client-go/meta kmodules.xyz/client-go/tools/clientcmd -# open-cluster-management.io/addon-framework v0.9.1-0.20240402013859-be542a6d0a9c +# open-cluster-management.io/addon-framework v0.9.2 ## explicit; go 1.21 open-cluster-management.io/addon-framework/pkg/addonfactory open-cluster-management.io/addon-framework/pkg/addonmanager open-cluster-management.io/addon-framework/pkg/addonmanager/constants open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate -open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig -open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddon +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration open-cluster-management.io/addon-framework/pkg/agent open-cluster-management.io/addon-framework/pkg/assets open-cluster-management.io/addon-framework/pkg/basecontroller/factory open-cluster-management.io/addon-framework/pkg/index +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils # open-cluster-management.io/api v0.13.0 ## explicit; go 1.21 @@ -1000,13 +1000,15 @@ open-cluster-management.io/api/work/v1alpha1 # open-cluster-management.io/managed-serviceaccount v0.5.0 ## explicit; go 1.20 open-cluster-management.io/managed-serviceaccount/apis/authentication/v1alpha1 -# open-cluster-management.io/sdk-go v0.13.0 +# open-cluster-management.io/sdk-go v0.13.1-0.20240416030555-aa744f426379 ## explicit; go 1.21 +open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1 +open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder open-cluster-management.io/sdk-go/pkg/patcher -# sigs.k8s.io/controller-runtime v0.17.2 => github.com/kmodules/controller-runtime v0.16.1-0.20240128092212-43c4e15c56b1 +# sigs.k8s.io/controller-runtime v0.17.3 => github.com/kmodules/controller-runtime v0.17.4-0.20240410011645-f1b2f533ea66 ## explicit; go 1.21 sigs.k8s.io/controller-runtime sigs.k8s.io/controller-runtime/pkg/builder @@ -1150,7 +1152,7 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 # github.com/Masterminds/sprig/v3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8 -# sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.16.1-0.20240128092212-43c4e15c56b1 +# sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.17.4-0.20240410011645-f1b2f533ea66 # github.com/imdario/mergo => github.com/imdario/mergo v0.3.6 # k8s.io/apiserver => github.com/kmodules/apiserver v0.29.1-0.20240104121741-1fb217d4a573 # k8s.io/kubernetes => github.com/kmodules/kubernetes v1.30.0-alpha.0.0.20231224075822-3bd9a13c86db diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go index d92690f7..e806be6a 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go @@ -10,9 +10,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterv1 "open-cluster-management.io/api/cluster/v1" "open-cluster-management.io/addon-framework/pkg/agent" @@ -36,12 +34,9 @@ type AgentAddonFactory struct { getValuesFuncs []GetValuesFunc agentAddonOptions agent.AgentAddonOptions // trimCRDDescription flag is used to trim the description of CRDs in manifestWork. disabled by default. - trimCRDDescription bool - // Deprecated: use clusterClient to get the hosting cluster. + trimCRDDescription bool hostingCluster *clusterv1.ManagedCluster - clusterClient clusterclientset.Interface agentInstallNamespace func(addon *addonapiv1alpha1.ManagedClusterAddOn) (string, error) - helmEngineStrict bool } // NewAgentAddonFactory builds an addonAgentFactory instance with addon name and fs. @@ -58,14 +53,12 @@ func NewAgentAddonFactory(addonName string, fs embed.FS, dir string) *AgentAddon agentAddonOptions: agent.AgentAddonOptions{ AddonName: addonName, Registration: nil, + InstallStrategy: nil, HealthProber: nil, SupportedConfigGVRs: []schema.GroupVersionResource{}, - // Set a default hosted mode info func. - HostedModeInfoFunc: constants.GetHostedModeInfo, }, trimCRDDescription: false, scheme: s, - helmEngineStrict: false, } } @@ -85,6 +78,19 @@ func (f *AgentAddonFactory) WithGetValuesFuncs(getValuesFuncs ...GetValuesFunc) return f } +// WithInstallStrategy defines the installation strategy of the manifests prescribed by Manifests(..). +// Deprecated: add annotation "addon.open-cluster-management.io/lifecycle: addon-manager" to ClusterManagementAddon +// and define install strategy in ClusterManagementAddon spec.installStrategy instead. +// The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. +func (f *AgentAddonFactory) WithInstallStrategy(strategy *agent.InstallStrategy) *AgentAddonFactory { + if strategy.InstallNamespace == "" { + strategy.InstallNamespace = AddonDefaultInstallNamespace + } + f.agentAddonOptions.InstallStrategy = strategy + + return f +} + // WithAgentRegistrationOption defines how agent is registered to the hub cluster. func (f *AgentAddonFactory) WithAgentRegistrationOption(option *agent.RegistrationOption) *AgentAddonFactory { f.agentAddonOptions.Registration = option @@ -103,25 +109,12 @@ func (f *AgentAddonFactory) WithAgentHostedModeEnabledOption() *AgentAddonFactor return f } -// WithAgentHostedInfoFn sets the function to get the hosting cluster of an addon in the hosted mode. -func (f *AgentAddonFactory) WithAgentHostedInfoFn( - infoFn func(*addonapiv1alpha1.ManagedClusterAddOn, *clusterv1.ManagedCluster) (string, string)) *AgentAddonFactory { - f.agentAddonOptions.HostedModeInfoFunc = infoFn - return f -} - // WithTrimCRDDescription is to enable trim the description of CRDs in manifestWork. func (f *AgentAddonFactory) WithTrimCRDDescription() *AgentAddonFactory { f.trimCRDDescription = true return f } -// WithHelmEngineStrict is to enable script go template rendering for Helm charts to generate manifestWork. -func (f *AgentAddonFactory) WithHelmEngineStrict() *AgentAddonFactory { - f.helmEngineStrict = true - return f -} - // WithConfigGVRs defines the addon supported configuration GroupVersionResource func (f *AgentAddonFactory) WithConfigGVRs(gvrs ...schema.GroupVersionResource) *AgentAddonFactory { f.agentAddonOptions.SupportedConfigGVRs = append(f.agentAddonOptions.SupportedConfigGVRs, gvrs...) @@ -130,18 +123,11 @@ func (f *AgentAddonFactory) WithConfigGVRs(gvrs ...schema.GroupVersionResource) // WithHostingCluster defines the hosting cluster used in hosted mode. An AgentAddon may use this to provide // additional metadata. -// Deprecated: use WithManagedClusterClient to set a cluster client that can get the hosting cluster. func (f *AgentAddonFactory) WithHostingCluster(cluster *clusterv1.ManagedCluster) *AgentAddonFactory { f.hostingCluster = cluster return f } -// WithManagedClusterClient defines the cluster client that can get the hosting cluster used in hosted mode. -func (f *AgentAddonFactory) WithManagedClusterClient(c clusterclientset.Interface) *AgentAddonFactory { - f.clusterClient = c - return f -} - // WithAgentDeployTriggerClusterFilter defines the filter func to trigger the agent deploy/redploy when cluster info is // changed. Addons that need information from the ManagedCluster resource when deploying the agent should use this // function to set what information they need, otherwise the expected/up-to-date agent may be deployed delayed since the diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go index e010d207..fbb9b8c5 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go @@ -2,7 +2,6 @@ package addonfactory import ( "bufio" - "context" "fmt" "io" "sort" @@ -11,18 +10,14 @@ import ( "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/chartutil" "helm.sh/helm/v3/pkg/engine" - "helm.sh/helm/v3/pkg/releaseutil" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" ) @@ -47,16 +42,13 @@ type helmDefaultValues struct { } type HelmAgentAddon struct { - decoder runtime.Decoder - chart *chart.Chart - getValuesFuncs []GetValuesFunc - agentAddonOptions agent.AgentAddonOptions - trimCRDDescription bool - // Deprecated: use clusterClient to get the hosting cluster. + decoder runtime.Decoder + chart *chart.Chart + getValuesFuncs []GetValuesFunc + agentAddonOptions agent.AgentAddonOptions + trimCRDDescription bool hostingCluster *clusterv1.ManagedCluster - clusterClient clusterclientset.Interface agentInstallNamespace func(addon *addonapiv1alpha1.ManagedClusterAddOn) (string, error) - helmEngineStrict bool } func newHelmAgentAddon(factory *AgentAddonFactory, chart *chart.Chart) *HelmAgentAddon { @@ -67,40 +59,11 @@ func newHelmAgentAddon(factory *AgentAddonFactory, chart *chart.Chart) *HelmAgen agentAddonOptions: factory.agentAddonOptions, trimCRDDescription: factory.trimCRDDescription, hostingCluster: factory.hostingCluster, - clusterClient: factory.clusterClient, agentInstallNamespace: factory.agentInstallNamespace, - helmEngineStrict: factory.helmEngineStrict, } } func (a *HelmAgentAddon) Manifests( - cluster *clusterv1.ManagedCluster, - addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { - objects, err := a.renderManifests(cluster, addon) - if err != nil { - return nil, err - } - - manifests := make([]manifest, 0, len(objects)) - for _, obj := range objects { - a, err := meta.TypeAccessor(obj) - if err != nil { - return nil, err - } - manifests = append(manifests, manifest{ - Object: obj, - Kind: a.GetKind(), - }) - } - sortManifestsByKind(manifests, releaseutil.InstallOrder) - - for i, manifest := range manifests { - objects[i] = manifest.Object - } - return objects, nil -} - -func (a *HelmAgentAddon) renderManifests( cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) ([]runtime.Object, error) { var objects []runtime.Object @@ -111,7 +74,7 @@ func (a *HelmAgentAddon) renderManifests( } helmEngine := engine.Engine{ - Strict: a.helmEngineStrict, + Strict: true, LintMode: false, } @@ -130,7 +93,16 @@ func (a *HelmAgentAddon) renderManifests( return objects, err } - for k, data := range templates { + // sort the filenames of the templates so the manifests are ordered consistently + keys := make([]string, 0, len(templates)) + for k := range templates { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + data := templates[k] + if len(data) == 0 { continue } @@ -159,6 +131,7 @@ func (a *HelmAgentAddon) renderManifests( objects = append(objects, object) } } + } if a.trimCRDDescription { @@ -251,7 +224,7 @@ func (a *HelmAgentAddon) getBuiltinValues( } builtinValues.AddonInstallNamespace = addonInstallNamespace - builtinValues.InstallMode, _ = a.agentAddonOptions.HostedModeInfoFunc(addon, cluster) + builtinValues.InstallMode, _ = constants.GetHostedModeInfo(addon.GetAnnotations()) helmBuiltinValues, err := JsonStructToValues(builtinValues) if err != nil { @@ -261,12 +234,6 @@ func (a *HelmAgentAddon) getBuiltinValues( return helmBuiltinValues, nil } -// Deprecated: use "WithManagedClusterClient" in AgentAddonFactory to set a cluster client that -// can be used to get the hosting cluster. -func (a *HelmAgentAddon) SetHostingCluster(hostingCluster *clusterv1.ManagedCluster) { - a.hostingCluster = hostingCluster -} - func (a *HelmAgentAddon) getDefaultValues( cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) (Values, error) { @@ -281,21 +248,6 @@ func (a *HelmAgentAddon) getDefaultValues( if a.hostingCluster != nil { defaultValues.HostingClusterCapabilities = *a.capabilities(a.hostingCluster, addon) - } else if a.clusterClient != nil { - _, hostingClusterName := a.agentAddonOptions.HostedModeInfoFunc(addon, cluster) - if len(hostingClusterName) > 0 { - hostingCluster, err := a.clusterClient.ClusterV1().ManagedClusters(). - Get(context.TODO(), hostingClusterName, metav1.GetOptions{}) - if err == nil { - defaultValues.HostingClusterCapabilities = *a.capabilities(hostingCluster, addon) - } else if errors.IsNotFound(err) { - klog.Infof("hostingCluster %s not found, skip providing default value hostingClusterCapabilities", - hostingClusterName) - } else { - klog.Errorf("failed to get hostingCluster %s. err:%v", hostingClusterName, err) - return nil, err - } - } } helmDefaultValues, err := JsonStructToValues(defaultValues) @@ -328,47 +280,3 @@ func (a *HelmAgentAddon) releaseOptions( releaseOptions.Namespace = namespace return releaseOptions, nil } - -// manifest represents a manifest file, which has a name and some content. -type manifest struct { - Object runtime.Object - Kind string -} - -// sort manifests by kind. -// -// Results are sorted by 'ordering', keeping order of items with equal kind/priority -func sortManifestsByKind(manifests []manifest, ordering releaseutil.KindSortOrder) []manifest { - sort.SliceStable(manifests, func(i, j int) bool { - return lessByKind(manifests[i], manifests[j], manifests[i].Kind, manifests[j].Kind, ordering) - }) - - return manifests -} - -func lessByKind(a interface{}, b interface{}, kindA string, kindB string, o releaseutil.KindSortOrder) bool { - ordering := make(map[string]int, len(o)) - for v, k := range o { - ordering[k] = v - } - - first, aok := ordering[kindA] - second, bok := ordering[kindB] - - if !aok && !bok { - // if both are unknown then sort alphabetically by kind, keep original order if same kind - if kindA != kindB { - return kindA < kindB - } - return first < second - } - // unknown kind is last - if !aok { - return false - } - if !bok { - return true - } - // sort different kinds, keep original order if same priority - return first < second -} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go index 5772fe95..ed2977ac 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go @@ -9,6 +9,7 @@ import ( addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/assets" ) @@ -142,7 +143,7 @@ func (a *TemplateAgentAddon) getBuiltinValues( } builtinValues.AddonInstallNamespace = installNamespace - builtinValues.InstallMode, _ = a.agentAddonOptions.HostedModeInfoFunc(addon, cluster) + builtinValues.InstallMode, _ = constants.GetHostedModeInfo(addon.GetAnnotations()) return StructToValues(builtinValues), nil } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go index ffc2829a..56e6f016 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go @@ -4,7 +4,6 @@ import ( "fmt" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterv1 "open-cluster-management.io/api/cluster/v1" ) const ( @@ -36,11 +35,8 @@ func PreDeleteHookHostingWorkName(addonNamespace, addonName string) string { } // GetHostedModeInfo returns addon installation mode and hosting cluster name. -func GetHostedModeInfo(addon *addonv1alpha1.ManagedClusterAddOn, _ *clusterv1.ManagedCluster) (string, string) { - if len(addon.Annotations) == 0 { - return InstallModeDefault, "" - } - hostingClusterName, ok := addon.Annotations[addonv1alpha1.HostingClusterNameAnnotationKey] +func GetHostedModeInfo(annotations map[string]string) (string, string) { + hostingClusterName, ok := annotations[addonv1alpha1.HostingClusterNameAnnotationKey] if !ok { return InstallModeDefault, "" } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go new file mode 100644 index 00000000..4c57759e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go @@ -0,0 +1,138 @@ +package addoninstall + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + errorsutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +// managedClusterController reconciles instances of ManagedCluster on the hub. +type addonInstallController struct { + addonClient addonv1alpha1client.Interface + managedClusterLister clusterlister.ManagedClusterLister + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + agentAddons map[string]agent.AgentAddon +} + +func NewAddonInstallController( + addonClient addonv1alpha1client.Interface, + clusterInformers clusterinformers.ManagedClusterInformer, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + agentAddons map[string]agent.AgentAddon, +) factory.Controller { + c := &addonInstallController{ + addonClient: addonClient, + managedClusterLister: clusterInformers.Lister(), + managedClusterAddonLister: addonInformers.Lister(), + agentAddons: agentAddons, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetNamespace()} + }, + func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := c.agentAddons[accessor.GetName()]; !ok { + return false + } + + return true + }, + addonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + return []string{accessor.GetName()} + }, + clusterInformers.Informer(), + ). + WithSync(c.sync).ToController("addon-install-controller") +} + +func (c *addonInstallController) sync(ctx context.Context, syncCtx factory.SyncContext, clusterName string) error { + klog.V(4).Infof("Reconciling addon deploy on cluster %q", clusterName) + + cluster, err := c.managedClusterLister.Get(clusterName) + if errors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // if cluster is deleting, do not install addon + if !cluster.DeletionTimestamp.IsZero() { + klog.V(4).Infof("Cluster %q is deleting, skip addon deploy", clusterName) + return nil + } + + if value, ok := cluster.Annotations[addonapiv1alpha1.DisableAddonAutomaticInstallationAnnotationKey]; ok && + strings.EqualFold(value, "true") { + + klog.V(4).Infof("Cluster %q has annotation %q, skip addon deploy", + clusterName, addonapiv1alpha1.DisableAddonAutomaticInstallationAnnotationKey) + return nil + } + + var errs []error + + for addonName, addon := range c.agentAddons { + if addon.GetAgentAddonOptions().InstallStrategy == nil { + continue + } + + managedClusterFilter := addon.GetAgentAddonOptions().InstallStrategy.GetManagedClusterFilter() + if managedClusterFilter == nil { + continue + } + if !managedClusterFilter(cluster) { + klog.V(4).Infof("managed cluster filter is not match for addon %s on %s", addonName, clusterName) + continue + } + + err = c.applyAddon(ctx, addonName, clusterName, addon.GetAgentAddonOptions().InstallStrategy.InstallNamespace) + if err != nil { + errs = append(errs, err) + } + } + + return errorsutil.NewAggregate(errs) +} + +func (c *addonInstallController) applyAddon(ctx context.Context, addonName, clusterName, installNamespace string) error { + _, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) + + // only create addon when it is missing, if user update the addon resource ,it should not be reverted + if errors.IsNotFound(err) { + addon := &addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: addonName, + Namespace: clusterName, + }, + Spec: addonapiv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: installNamespace, + }, + } + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Create(ctx, addon, metav1.CreateOptions{}) + return err + } + + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go index e5e7bed3..930a0afa 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go @@ -252,37 +252,25 @@ func (c *addonDeployController) sync(ctx context.Context, syncCtx factory.SyncCo syncers := []addonDeploySyncer{ &defaultSyncer{ - buildWorks: c.buildDeployManifestWorksFunc( - newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder), - addonapiv1alpha1.ManagedClusterAddOnManifestApplied, - ), + buildWorks: c.buildDeployManifestWorks, applyWork: c.applyWork, getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkByAddon), deleteWork: c.workApplier.Delete, agentAddon: agentAddon, }, &hostedSyncer{ - buildWorks: c.buildDeployManifestWorksFunc( - newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder), - addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, - ), + buildWorks: c.buildDeployManifestWorks, applyWork: c.applyWork, deleteWork: c.workApplier.Delete, getCluster: c.managedClusterLister.Get, getWorkByAddon: c.getWorksByAddonFn(index.ManifestWorkByHostedAddon), agentAddon: agentAddon}, &defaultHookSyncer{ - buildWorks: c.buildHookManifestWorkFunc( - newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder), - addonapiv1alpha1.ManagedClusterAddOnManifestApplied, - ), + buildWorks: c.buildHookManifestWork, applyWork: c.applyWork, agentAddon: agentAddon}, &hostedHookSyncer{ - buildWorks: c.buildHookManifestWorkFunc( - newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder), - addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, - ), + buildWorks: c.buildHookManifestWork, applyWork: c.applyWork, deleteWork: c.workApplier.Delete, getCluster: c.managedClusterLister.Get, @@ -366,109 +354,103 @@ func (c *addonDeployController) applyWork(ctx context.Context, appliedType strin return work, nil } -type buildDeployWorkFunc func( - workNamespace string, +func (c *addonDeployController) buildDeployManifestWorks(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, - addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) - -func (c *addonDeployController) buildDeployManifestWorksFunc(addonWorkBuilder *addonWorksBuilder, appliedType string) buildDeployWorkFunc { - return func( - workNamespace string, - cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, - addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) { - agentAddon := c.agentAddons[addon.Name] - if agentAddon == nil { - return nil, nil, fmt.Errorf("failed to get agentAddon") - } + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) { + var appliedType string + var addonWorkBuilder *addonWorksBuilder - objects, err := agentAddon.Manifests(cluster, addon) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: appliedType, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, - Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), - }) - return nil, nil, err - } - if len(objects) == 0 { - return nil, nil, nil - } + agentAddon := c.agentAddons[addon.Name] + if agentAddon == nil { + return nil, nil, fmt.Errorf("failed to get agentAddon") + } - // this is to retrieve the intended mode of the addon. - var mode string - if agentAddon.GetAgentAddonOptions().HostedModeInfoFunc == nil { - mode = constants.InstallModeDefault - } else { - mode, _ = agentAddon.GetAgentAddonOptions().HostedModeInfoFunc(addon, cluster) - } + switch installMode { + case constants.InstallModeHosted: + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied + addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + case constants.InstallModeDefault: + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied + addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + default: + return nil, nil, fmt.Errorf("invalid install mode %v", installMode) + } - manifestOptions := getManifestConfigOption(agentAddon, cluster, addon) - existingWorksCopy := []workapiv1.ManifestWork{} - for _, work := range existingWorks { - existingWorksCopy = append(existingWorksCopy, *work) - } - appliedWorks, deleteWorks, err = addonWorkBuilder.BuildDeployWorks( - mode, workNamespace, addon, existingWorksCopy, objects, manifestOptions) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: appliedType, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, - Message: fmt.Sprintf("failed to build manifestwork: %v", err), - }) - return nil, nil, err - } - return appliedWorks, deleteWorks, nil + objects, err := agentAddon.Manifests(cluster, addon) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), + }) + return nil, nil, err + } + if len(objects) == 0 { + return nil, nil, nil } -} -type buildDeployHookFunc func( - workNamespace string, - cluster *clusterv1.ManagedCluster, - addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + manifestOptions := getManifestConfigOption(agentAddon, cluster, addon) + existingWorksCopy := []workapiv1.ManifestWork{} + for _, work := range existingWorks { + existingWorksCopy = append(existingWorksCopy, *work) + } + appliedWorks, deleteWorks, err = addonWorkBuilder.BuildDeployWorks(workNamespace, addon, existingWorksCopy, objects, manifestOptions) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to build manifestwork: %v", err), + }) + return nil, nil, err + } + return appliedWorks, deleteWorks, nil +} +func (c *addonDeployController) buildHookManifestWork(installMode, workNamespace string, + cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) { + var appliedType string + var addonWorkBuilder *addonWorksBuilder + + agentAddon := c.agentAddons[addon.Name] + if agentAddon == nil { + return nil, fmt.Errorf("failed to get agentAddon") + } -func (c *addonDeployController) buildHookManifestWorkFunc(addonWorkBuilder *addonWorksBuilder, appliedType string) buildDeployHookFunc { - return func( - workNamespace string, - cluster *clusterv1.ManagedCluster, - addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) { - agentAddon := c.agentAddons[addon.Name] - if agentAddon == nil { - return nil, fmt.Errorf("failed to get agentAddon") - } + switch installMode { + case constants.InstallModeHosted: + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied + addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + case constants.InstallModeDefault: + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied + addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) + default: + return nil, fmt.Errorf("invalid install mode %v", installMode) + } - objects, err := agentAddon.Manifests(cluster, addon) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: appliedType, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, - Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), - }) - return nil, err - } - if len(objects) == 0 { - return nil, nil - } + objects, err := agentAddon.Manifests(cluster, addon) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), + }) + return nil, err + } + if len(objects) == 0 { + return nil, nil + } - // this is to retrieve the intended mode of the addon. - var mode string - if agentAddon.GetAgentAddonOptions().HostedModeInfoFunc == nil { - mode = constants.InstallModeDefault - } else { - mode, _ = agentAddon.GetAgentAddonOptions().HostedModeInfoFunc(addon, cluster) - } - hookWork, err := addonWorkBuilder.BuildHookWork(mode, workNamespace, addon, objects) - if err != nil { - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: appliedType, - Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, - Message: fmt.Sprintf("failed to build manifestwork: %v", err), - }) - return nil, err - } - return hookWork, nil + hookWork, err := addonWorkBuilder.BuildHookWork(workNamespace, addon, objects) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: appliedType, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, + Message: fmt.Sprintf("failed to build manifestwork: %v", err), + }) + return nil, err } + return hookWork, nil } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go index 03e45f9b..bcca1220 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go @@ -10,13 +10,15 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type defaultHookSyncer struct { - buildWorks buildDeployHookFunc - applyWork func(ctx context.Context, appliedType string, + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) + applyWork func(ctx context.Context, appliedType string, work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) agentAddon agent.AgentAddon } @@ -27,7 +29,7 @@ func (s *defaultHookSyncer) sync(ctx context.Context, addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { deployWorkNamespace := addon.Namespace - hookWork, err := s.buildWorks(deployWorkNamespace, cluster, addon) + hookWork, err := s.buildWorks(constants.InstallModeDefault, deployWorkNamespace, cluster, addon) if err != nil { return addon, err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go index 62185172..c53c76ab 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go @@ -8,12 +8,14 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type defaultSyncer struct { - buildWorks buildDeployWorkFunc + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) applyWork func(ctx context.Context, appliedType string, work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) @@ -52,7 +54,7 @@ func (s *defaultSyncer) sync(ctx context.Context, return addon, err } - deployWorks, deleteWorks, err := s.buildWorks(deployWorkNamespace, cluster, currentWorks, addon) + deployWorks, deleteWorks, err := s.buildWorks(constants.InstallModeDefault, deployWorkNamespace, cluster, currentWorks, addon) if err != nil { return addon, err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go index 3ee5eb47..98b14f80 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go @@ -18,7 +18,8 @@ import ( ) type hostedHookSyncer struct { - buildWorks buildDeployHookFunc + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) applyWork func(ctx context.Context, appliedType string, work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) @@ -42,10 +43,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, return addon, nil } - if s.agentAddon.GetAgentAddonOptions().HostedModeInfoFunc == nil { - return addon, nil - } - installMode, hostingClusterName := s.agentAddon.GetAgentAddonOptions().HostedModeInfoFunc(addon, cluster) + installMode, hostingClusterName := constants.GetHostedModeInfo(addon.GetAnnotations()) if installMode != constants.InstallModeHosted { return addon, nil } @@ -72,7 +70,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } - hookWork, err := s.buildWorks(hostingClusterName, cluster, addon) + hookWork, err := s.buildWorks(constants.InstallModeHosted, hostingClusterName, cluster, addon) if err != nil { return addon, err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go index 19a9bca4..55a0546f 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go @@ -18,7 +18,8 @@ import ( ) type hostedSyncer struct { - buildWorks buildDeployWorkFunc + buildWorks func(installMode, workNamespace string, cluster *clusterv1.ManagedCluster, existingWorks []*workapiv1.ManifestWork, + addon *addonapiv1alpha1.ManagedClusterAddOn) (appliedWorks, deleteWorks []*workapiv1.ManifestWork, err error) applyWork func(ctx context.Context, appliedType string, work *workapiv1.ManifestWork, addon *addonapiv1alpha1.ManagedClusterAddOn) (*workapiv1.ManifestWork, error) @@ -41,10 +42,7 @@ func (s *hostedSyncer) sync(ctx context.Context, return addon, nil } - if s.agentAddon.GetAgentAddonOptions().HostedModeInfoFunc == nil { - return addon, nil - } - installMode, hostingClusterName := s.agentAddon.GetAgentAddonOptions().HostedModeInfoFunc(addon, cluster) + installMode, hostingClusterName := constants.GetHostedModeInfo(addon.GetAnnotations()) if installMode != constants.InstallModeHosted { // the installMode is changed from hosted to default, cleanup the hosting resources if err := s.cleanupDeployWork(ctx, addon); err != nil { @@ -117,7 +115,7 @@ func (s *hostedSyncer) sync(ctx context.Context, return addon, err } - deployWorks, deleteWorks, err := s.buildWorks(hostingClusterName, cluster, currentWorks, addon) + deployWorks, deleteWorks, err := s.buildWorks(constants.InstallModeHosted, hostingClusterName, cluster, currentWorks, addon) if err != nil { return addon, err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go index 7df50357..a258369e 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -260,16 +260,19 @@ func (m *managedManifest) preDeleteHookManifestWorkName(addonNamespace, addonNam // BuildDeployWorks returns the deploy manifestWorks. if there is no manifest need // to deploy, will return nil. -func (b *addonWorksBuilder) BuildDeployWorks(installMode, addonWorkNamespace string, +func (b *addonWorksBuilder) BuildDeployWorks(addonWorkNamespace string, addon *addonapiv1alpha1.ManagedClusterAddOn, existingWorks []workapiv1.ManifestWork, objects []runtime.Object, manifestOptions []workapiv1.ManifestConfigOption) (deployWorks, deleteWorks []*workapiv1.ManifestWork, err error) { var deployObjects []runtime.Object + var owner *metav1.OwnerReference + installMode, _ := constants.GetHostedModeInfo(addon.GetAnnotations()) + // This owner is only added to the manifestWork deployed in managed cluster ns. // the manifestWork in managed cluster ns is cleaned up via the addon ownerRef, so need to add the owner. // the manifestWork in hosting cluster ns is cleaned up by its controller since it and its addon cross ns. - owner := metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) + owner = metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) var deletionOrphaningRules []workapiv1.OrphaningRule for _, object := range objects { @@ -325,13 +328,18 @@ func (b *addonWorksBuilder) BuildDeployWorks(installMode, addonWorkNamespace str // BuildHookWork returns the preDelete manifestWork, if there is no manifest need // to deploy, will return nil. -func (b *addonWorksBuilder) BuildHookWork(installMode, addonWorkNamespace string, +func (b *addonWorksBuilder) BuildHookWork(addonWorkNamespace string, addon *addonapiv1alpha1.ManagedClusterAddOn, objects []runtime.Object) (hookWork *workapiv1.ManifestWork, err error) { var hookManifests []workapiv1.Manifest var hookManifestConfigs []workapiv1.ManifestConfigOption + var owner *metav1.OwnerReference + installMode, _ := constants.GetHostedModeInfo(addon.GetAnnotations()) - owner := metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) + // only set addon as the owner of works in default mode. should not set owner in hosted mode. + if installMode == constants.InstallModeDefault { + owner = metav1.NewControllerRef(addon, addonapiv1alpha1.GroupVersion.WithKind("ManagedClusterAddOn")) + } for _, object := range objects { deployable, err := b.processor.deployable(b.hostedModeEnabled, installMode, object) @@ -359,9 +367,7 @@ func (b *addonWorksBuilder) BuildHookWork(installMode, addonWorkNamespace string } hookWork = newManifestWork(addon.Namespace, addon.Name, addonWorkNamespace, hookManifests, b.processor.preDeleteHookManifestWorkName) - - // This owner is only added to the manifestWork deployed in managed cluster ns. - if addon.Namespace == addonWorkNamespace { + if owner != nil { hookWork.OwnerReferences = []metav1.OwnerReference{*owner} } hookWork.Spec.ManifestConfigs = hookManifestConfigs diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go index 07543ea8..6c0a3d76 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go @@ -71,7 +71,7 @@ func NewCSRSignController( }, csrInformer.Informer()). WithSync(c.sync). - ToController("CSRSignController") + ToController("CSRApprovingController") } func (c *csrSignController) sync(ctx context.Context, syncCtx factory.SyncContext, csrName string) error { diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddon/controller.go similarity index 75% rename from vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby/controller.go rename to vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddon/controller.go index a8c67909..47ef7b86 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddon/controller.go @@ -1,4 +1,4 @@ -package cmamanagedby +package managementaddon import ( "context" @@ -17,15 +17,11 @@ import ( ) const ( - controllerName = "cma-managed-by-controller" + controllerName = "management-addon-controller" ) -// cmaManagedByController reconciles clustermanagementaddon on the hub -// to update the annotation "addon.open-cluster-management.io/lifecycle" value. -// It removes the value "self" if exist, which indicate the -// the installation and upgrade of addon will no longer be managed by addon itself. -// Once removed, the value will be set to "addon-manager" by the general addon manager. -type cmaManagedByController struct { +// clusterManagementAddonController reconciles cma on the hub. +type clusterManagementAddonController struct { addonClient addonv1alpha1client.Interface clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister agentAddons map[string]agent.AgentAddon @@ -35,7 +31,7 @@ type cmaManagedByController struct { addonapiv1alpha1.ClusterManagementAddOnStatus] } -func NewCMAManagedByController( +func NewManagementAddonController( addonClient addonv1alpha1client.Interface, clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, agentAddons map[string]agent.AgentAddon, @@ -43,7 +39,7 @@ func NewCMAManagedByController( ) factory.Controller { syncCtx := factory.NewSyncContext(controllerName) - c := &cmaManagedByController{ + c := &clusterManagementAddonController{ addonClient: addonClient, clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), agentAddons: agentAddons, @@ -64,7 +60,7 @@ func NewCMAManagedByController( WithSync(c.sync).ToController(controllerName) } -func (c *cmaManagedByController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { +func (c *clusterManagementAddonController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { _, addonName, err := cache.SplitMetaNamespaceKey(key) if err != nil { // ignore addon whose key is invalid @@ -80,14 +76,19 @@ func (c *cmaManagedByController) sync(ctx context.Context, syncCtx factory.SyncC return err } - // Remove the annotation value "self" since the WithInstallStrategy() is removed in addon-framework. + addon := c.agentAddons[cma.GetName()] + if addon.GetAgentAddonOptions().InstallStrategy == nil { + return nil + } + + // If the addon defines install strategy via WithInstallStrategy(), force add annotation "addon.open-cluster-management.io/lifecycle: self" to cma. + // The annotation with value "self" will be removed when remove WithInstallStrategy() in addon-framework. // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. cmaCopy := cma.DeepCopy() - if cmaCopy.Annotations == nil || - cmaCopy.Annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] != addonapiv1alpha1.AddonLifecycleSelfManageAnnotationValue { - return nil + if cmaCopy.Annotations == nil { + cmaCopy.Annotations = map[string]string{} } - cmaCopy.Annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] = "" + cmaCopy.Annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] = addonapiv1alpha1.AddonLifecycleSelfManageAnnotationValue _, err = c.addonPatcher.PatchLabelAnnotations(ctx, cmaCopy, cmaCopy.ObjectMeta, cma.ObjectMeta) return err diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go similarity index 89% rename from vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig/controller.go rename to vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go index 55b87768..300caff8 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go @@ -1,4 +1,4 @@ -package cmaconfig +package managementaddonconfig import ( "context" @@ -30,8 +30,8 @@ const ( type enqueueFunc func(obj interface{}) -// cmaConfigController reconciles all interested addon config types (GroupVersionResource) on the hub. -type cmaConfigController struct { +// clusterManagementAddonConfigController reconciles all interested addon config types (GroupVersionResource) on the hub. +type clusterManagementAddonConfigController struct { addonClient addonv1alpha1client.Interface clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister clusterManagementAddonIndexer cache.Indexer @@ -44,7 +44,7 @@ type cmaConfigController struct { addonapiv1alpha1.ClusterManagementAddOnStatus] } -func NewCMAConfigController( +func NewManagementAddonConfigController( addonClient addonv1alpha1client.Interface, clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, configInformerFactory dynamicinformer.DynamicSharedInformerFactory, @@ -53,7 +53,7 @@ func NewCMAConfigController( ) factory.Controller { syncCtx := factory.NewSyncContext(controllerName) - c := &cmaConfigController{ + c := &clusterManagementAddonConfigController{ addonClient: addonClient, clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), clusterManagementAddonIndexer: clusterManagementAddonInformers.Informer().GetIndexer(), @@ -78,7 +78,7 @@ func NewCMAConfigController( WithSync(c.sync).ToController(controllerName) } -func (c *cmaConfigController) buildConfigInformers( +func (c *clusterManagementAddonConfigController) buildConfigInformers( configInformerFactory dynamicinformer.DynamicSharedInformerFactory, configGVRs map[schema.GroupVersionResource]bool, ) []factory.Informer { @@ -104,7 +104,7 @@ func (c *cmaConfigController) buildConfigInformers( return configInformers } -func (c *cmaConfigController) enqueueClusterManagementAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { +func (c *clusterManagementAddonConfigController) enqueueClusterManagementAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { return func(obj interface{}) { namespaceName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { @@ -129,7 +129,7 @@ func (c *cmaConfigController) enqueueClusterManagementAddOnsByConfig(gvr schema. } } -func (c *cmaConfigController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { +func (c *clusterManagementAddonConfigController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { _, addonName, err := cache.SplitMetaNamespaceKey(key) if err != nil { // ignore addon whose key is invalid @@ -158,7 +158,7 @@ func (c *cmaConfigController) sync(ctx context.Context, syncCtx factory.SyncCont return err } -func (c *cmaConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.ClusterManagementAddOn) error { +func (c *clusterManagementAddonConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.ClusterManagementAddOn) error { for i, defaultConfigReference := range cma.Status.DefaultConfigReferences { if !utils.ContainGR( @@ -203,7 +203,7 @@ func (c *cmaConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.Cluster return nil } -func (c *cmaConfigController) getConfigSpecHash(gr addonapiv1alpha1.ConfigGroupResource, +func (c *clusterManagementAddonConfigController) getConfigSpecHash(gr addonapiv1alpha1.ConfigGroupResource, cr addonapiv1alpha1.ConfigReferent) (string, error) { lister, ok := c.configListers[schema.GroupResource{Group: gr.Group, Resource: gr.Resource}] if !ok { diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go index 57953b72..b2f3575e 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go @@ -22,14 +22,17 @@ import ( workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmaconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/cmamanagedby" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddon" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner" "open-cluster-management.io/addon-framework/pkg/utils" ) @@ -238,17 +241,33 @@ func (a *addonManager) StartWithInformers(ctx context.Context, a.addonAgents, ) + addonInstallController := addoninstall.NewAddonInstallController( + addonClient, + clusterInformers.Cluster().V1().ManagedClusters(), + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + a.addonAgents, + ) + // This controller is used during migrating addons to be managed by addon-manager. // This should be removed when the migration is done. // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. - managementAddonController := cmamanagedby.NewCMAManagedByController( + managementAddonController := managementaddon.NewManagementAddonController( addonClient, addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), a.addonAgents, utils.FilterByAddonName(a.addonAgents), ) - var addonConfigController, managementAddonConfigController factory.Controller + // This is a duplicate controller in general addon-manager. This should be removed when we + // alway enable the addon-manager + addonOwnerController := addonowner.NewAddonOwnerController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedBySelf(a.addonAgents), + ) + + var addonConfigController, managementAddonConfigController, addonConfigurationController factory.Controller if len(a.addonConfigs) != 0 { addonConfigController = addonconfig.NewAddonConfigController( addonClient, @@ -258,13 +277,24 @@ func (a *addonManager) StartWithInformers(ctx context.Context, a.addonConfigs, utils.FilterByAddonName(a.addonAgents), ) - managementAddonConfigController = cmaconfig.NewCMAConfigController( + managementAddonConfigController = managementaddonconfig.NewManagementAddonConfigController( addonClient, addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), dynamicInformers, a.addonConfigs, utils.FilterByAddonName(a.addonAgents), ) + + // start addonConfiguration controller, note this is to handle the case when the general addon-manager + // is not started, we should consider to remove this when the general addon-manager are always started. + // This controller will also ignore the installStrategy part. + addonConfigurationController = addonconfiguration.NewAddonConfigurationController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + nil, nil, + utils.ManagedBySelf(a.addonAgents), + ) } var csrApproveController factory.Controller @@ -304,14 +334,19 @@ func (a *addonManager) StartWithInformers(ctx context.Context, go deployController.Run(ctx, 1) go registrationController.Run(ctx, 1) + go addonInstallController.Run(ctx, 1) go managementAddonController.Run(ctx, 1) + go addonOwnerController.Run(ctx, 1) if addonConfigController != nil { go addonConfigController.Run(ctx, 1) } if managementAddonConfigController != nil { go managementAddonConfigController.Run(ctx, 1) } + if addonConfigurationController != nil { + go addonConfigurationController.Run(ctx, 1) + } if csrApproveController != nil { go csrApproveController.Run(ctx, 1) } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go index 721426cc..5fc8c08a 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go @@ -4,8 +4,11 @@ import ( "fmt" certificatesv1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" @@ -46,6 +49,14 @@ type AgentAddonOptions struct { // +optional Registration *RegistrationOption + // InstallStrategy defines that addon should be created in which clusters. + // Addon will not be installed automatically until a ManagedClusterAddon is applied to the cluster's + // namespace if InstallStrategy is nil. + // Deprecated: use installStrategy config in ClusterManagementAddOn API instead + // The migration plan refer to https://github.com/open-cluster-management-io/ocm/issues/355. + // +optional + InstallStrategy *InstallStrategy + // Updaters select a set of resources and define the strategies to update them. // UpdateStrategy is Update if no Updater is defined for a resource. // +optional @@ -63,16 +74,13 @@ type AgentAddonOptions struct { // +optional HostedModeEnabled bool - // HostedModeInfoFunc returns whether an addon is in hosted mode, and its hosting cluster. - HostedModeInfoFunc func(addon *addonapiv1alpha1.ManagedClusterAddOn, cluster *clusterv1.ManagedCluster) (string, string) - // SupportedConfigGVRs is a list of addon supported configuration GroupVersionResource // each configuration GroupVersionResource should be unique SupportedConfigGVRs []schema.GroupVersionResource // AgentDeployTriggerClusterFilter defines the filter func to trigger the agent deploy/redploy when cluster info is // changed. Addons that need information from the ManagedCluster resource when deploying the agent should use this - // field to set what information they need, otherwise the expected/up-to-date agent may be deployed updates since + // field to set what information they need, otherwise the expected/up-to-date agent may be deployed delayed since // the default filter func returns false when the ManagedCluster resource is updated. // // For example, the agentAddon needs information from the ManagedCluster annotation, it can set the filter function @@ -149,6 +157,23 @@ type RegistrationOption struct { CSRSign CSRSignerFunc } +// InstallStrategy is the installation strategy of the manifests prescribed by Manifests(..). +type InstallStrategy struct { + *installStrategy +} + +type installStrategy struct { + // InstallNamespace is target deploying namespace in the managed cluster upon automatic addon installation. + InstallNamespace string + + // managedClusterFilter will filter the clusters to install the addon to. + managedClusterFilter func(cluster *clusterv1.ManagedCluster) bool +} + +func (s *InstallStrategy) GetManagedClusterFilter() func(cluster *clusterv1.ManagedCluster) bool { + return s.managedClusterFilter +} + type Updater struct { // ResourceIdentifier sets what resources the strategy applies to ResourceIdentifier workapiv1.ResourceIdentifier @@ -233,6 +258,54 @@ func DefaultGroups(clusterName, addonName string) []string { } } +// InstallAllStrategy indicate to install addon to all clusters +func InstallAllStrategy(installNamespace string) *InstallStrategy { + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: func(cluster *clusterv1.ManagedCluster) bool { + return true + }, + }, + } +} + +// InstallByLabelStrategy indicate to install addon based on clusters' label +func InstallByLabelStrategy(installNamespace string, selector metav1.LabelSelector) *InstallStrategy { + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: func(cluster *clusterv1.ManagedCluster) bool { + selector, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + klog.Warningf("labels selector is not correct: %v", err) + return false + } + + if !selector.Matches(labels.Set(cluster.Labels)) { + return false + } + return true + }, + }, + } +} + +// InstallByFilterFunctionStrategy indicate to install addon based on a filter function, and it will also install addons if the filter function is nil. +func InstallByFilterFunctionStrategy(installNamespace string, f func(cluster *clusterv1.ManagedCluster) bool) *InstallStrategy { + if f == nil { + f = func(cluster *clusterv1.ManagedCluster) bool { + return true + } + } + return &InstallStrategy{ + &installStrategy{ + InstallNamespace: installNamespace, + managedClusterFilter: f, + }, + } +} + // ApprovalAllCSRs returns true for all csrs. func ApprovalAllCSRs(cluster *clusterv1.ManagedCluster, addon *addonapiv1alpha1.ManagedClusterAddOn, csr *certificatesv1.CertificateSigningRequest) bool { return true diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go new file mode 100644 index 00000000..5e38bfde --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go @@ -0,0 +1,114 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type managedClusterAddonConfigurationReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *managedClusterAddonConfigurationReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + + for _, addon := range graph.getAddonsToUpdate() { + mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) + err := d.patchAddonStatus(ctx, mca, addon.mca) + if err != nil { + errs = append(errs, err) + } + } + + return cma, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig( + mca *addonv1alpha1.ManagedClusterAddOn, desiredConfigMap addonConfigMap) *addonv1alpha1.ManagedClusterAddOn { + mcaCopy := mca.DeepCopy() + + var mergedConfigs []addonv1alpha1.ConfigReference + // remove configs that are not desired + for _, config := range mcaCopy.Status.ConfigReferences { + if _, ok := desiredConfigMap[config.ConfigGroupResource]; ok { + mergedConfigs = append(mergedConfigs, config) + } + } + + // append or update configs + for _, config := range desiredConfigMap { + var match bool + for i := range mergedConfigs { + if mergedConfigs[i].ConfigGroupResource != config.ConfigGroupResource { + continue + } + + match = true + // set LastObservedGeneration to 0 when config name/namespace changes + if mergedConfigs[i].DesiredConfig != nil && (mergedConfigs[i].DesiredConfig.ConfigReferent != config.DesiredConfig.ConfigReferent) { + mergedConfigs[i].LastObservedGeneration = 0 + } + mergedConfigs[i].ConfigReferent = config.ConfigReferent + mergedConfigs[i].DesiredConfig = config.DesiredConfig.DeepCopy() + } + + if !match { + mergedConfigs = append(mergedConfigs, config) + } + } + + mcaCopy.Status.ConfigReferences = mergedConfigs + return mcaCopy +} + +func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus(ctx context.Context, new, old *addonv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: old.Status.Namespace, + ConfigReferences: old.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: new.Status.Namespace, + ConfigReferences: new.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go new file mode 100644 index 00000000..a51c304c --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go @@ -0,0 +1,196 @@ +package addonconfiguration + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" +) + +// addonConfigurationController is a controller to update configuration of mca with the following order +// 1. use configuration in mca spec if it is set +// 2. use configuration in install strategy +// 3. use configuration in the default configuration in cma +type addonConfigurationController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + managedClusterAddonIndexer cache.Indexer + addonFilterFunc factory.EventFilterFunc + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + placementDecisionGetter PlacementDecisionGetter + + reconcilers []addonConfigurationReconcile +} + +type addonConfigurationReconcile interface { + reconcile(ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, + graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) +} + +type reconcileState int64 + +const ( + reconcileStop reconcileState = iota + reconcileContinue +) + +func NewAddonConfigurationController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + placementInformer clusterinformersv1beta1.PlacementInformer, + placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonConfigurationController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(), + addonFilterFunc: addonFilterFunc, + } + + c.reconcilers = []addonConfigurationReconcile{ + &managedClusterAddonConfigurationReconciler{ + addonClient: addonClient, + }, + &clusterManagementAddonProgressingReconciler{ + addonClient: addonClient, + }, + } + + controllerFactory := factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, + clusterManagementAddonInformers.Informer()).WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()) + + // This is to handle the case the self managed addon-manager does not have placementInformer/placementDecisionInformer. + // we will not consider installStrategy related placement for self managed addon-manager. + if placementInformer != nil && placementDecisionInformer != nil { + controllerFactory = controllerFactory.WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementDecisionQueueKey(clusterManagementAddonInformers), placementDecisionInformer.Informer()). + WithInformersQueueKeysFunc(index.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer()) + c.placementLister = placementInformer.Lister() + c.placementDecisionLister = placementDecisionInformer.Lister() + c.placementDecisionGetter = PlacementDecisionGetter{Client: placementDecisionInformer.Lister()} + } + + return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller") +} + +func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + klog.V(4).Infof("Reconciling addon %q", addonName) + + cma, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + cma = cma.DeepCopy() + graph, err := c.buildConfigurationGraph(cma) + if err != nil { + return err + } + + // generate the rollout result before calling reconcile() + // so that all the reconcilers are using the same rollout result + err = graph.generateRolloutResult() + if err != nil { + return err + } + + var state reconcileState + var errs []error + for _, reconciler := range c.reconcilers { + cma, state, err = reconciler.reconcile(ctx, cma, graph) + if err != nil { + errs = append(errs, err) + } + if state == reconcileStop { + break + } + } + + return utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha1.ClusterManagementAddOn) (*configurationGraph, error) { + graph := newGraph(cma.Spec.SupportedConfigs, cma.Status.DefaultConfigReferences) + addons, err := c.managedClusterAddonIndexer.ByIndex(index.ManagedClusterAddonByName, cma.Name) + if err != nil { + return graph, err + } + + // add all existing addons to the default at first + for _, addonObject := range addons { + addon := addonObject.(*addonv1alpha1.ManagedClusterAddOn) + graph.addAddonNode(addon) + } + + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return graph, nil + } + + // check each install strategy in status + var errs []error + for _, installProgression := range cma.Status.InstallProgressions { + for _, installStrategy := range cma.Spec.InstallStrategy.Placements { + if installStrategy.PlacementRef != installProgression.PlacementRef { + continue + } + + // add placement node + err = graph.addPlacementNode(installStrategy, installProgression, c.placementLister, c.placementDecisionGetter) + if err != nil { + errs = append(errs, err) + continue + } + } + } + + return graph, utilerrors.NewAggregate(errs) +} + +type PlacementDecisionGetter struct { + Client clusterlister.PlacementDecisionLister +} + +func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace string) ([]*clusterv1beta1.PlacementDecision, error) { + return pdl.Client.PlacementDecisions(namespace).List(selector) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go new file mode 100644 index 00000000..d0b381e2 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go @@ -0,0 +1,417 @@ +package addonconfiguration + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + clusterv1sdkalpha1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1" + clustersdkv1beta1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1" +) + +// configurationTree is a 2 level snapshot tree on the configuration of addons +// the first level is a list of nodes that represents a install strategy and a desired configuration for this install +// strategy. The second level is a list of nodes that represent each mca and its desired configuration +type configurationGraph struct { + // nodes maintains a list between a installStrategy and its related mcas + nodes []*installStrategyNode + // defaults is the nodes with no install strategy + defaults *installStrategyNode +} + +// installStrategyNode is a node in configurationGraph defined by a install strategy +type installStrategyNode struct { + placementRef addonv1alpha1.PlacementRef + pdTracker *clustersdkv1beta1.PlacementDecisionClustersTracker + rolloutStrategy clusterv1alpha1.RolloutStrategy + rolloutResult clusterv1sdkalpha1.RolloutResult + desiredConfigs addonConfigMap + // children keeps a map of addons node as the children of this node + children map[string]*addonNode + clusters sets.Set[string] +} + +// addonNode is node as a child of installStrategy node represting a mca +// addonnode +type addonNode struct { + desiredConfigs addonConfigMap + mca *addonv1alpha1.ManagedClusterAddOn + status *clusterv1sdkalpha1.ClusterRolloutStatus +} + +type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference + +// set addon rollout status +func (n *addonNode) setRolloutStatus() { + n.status = &clusterv1sdkalpha1.ClusterRolloutStatus{ClusterName: n.mca.Namespace} + + // desired configs doesn't match actual configs, set to ToApply + if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) { + n.status.Status = clusterv1sdkalpha1.ToApply + return + } + + var progressingCond metav1.Condition + for _, cond := range n.mca.Status.Conditions { + if cond.Type == addonv1alpha1.ManagedClusterAddOnConditionProgressing { + progressingCond = cond + break + } + } + + for _, actual := range n.mca.Status.ConfigReferences { + if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok { + // desired config spec hash doesn't match actual, set to ToApply + if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) { + n.status.Status = clusterv1sdkalpha1.ToApply + return + // desired config spec hash matches actual, but last applied config spec hash doesn't match actual + } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { + switch progressingCond.Reason { + case addonv1alpha1.ProgressingReasonInstallFailed, addonv1alpha1.ProgressingReasonUpgradeFailed: + n.status.Status = clusterv1sdkalpha1.Failed + n.status.LastTransitionTime = &progressingCond.LastTransitionTime + case addonv1alpha1.ProgressingReasonInstalling, addonv1alpha1.ProgressingReasonUpgrading: + n.status.Status = clusterv1sdkalpha1.Progressing + n.status.LastTransitionTime = &progressingCond.LastTransitionTime + default: + n.status.Status = clusterv1sdkalpha1.Progressing + } + return + } + } else { + n.status.Status = clusterv1sdkalpha1.ToApply + return + } + } + + // succeed + n.status.Status = clusterv1sdkalpha1.Succeeded + if progressingCond.Reason == addonv1alpha1.ProgressingReasonInstallSucceed || progressingCond.Reason == addonv1alpha1.ProgressingReasonUpgradeSucceed { + n.status.LastTransitionTime = &progressingCond.LastTransitionTime + } +} + +func (d addonConfigMap) copy() addonConfigMap { + output := addonConfigMap{} + for k, v := range d { + output[k] = v + } + return output +} + +func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferences []addonv1alpha1.DefaultConfigReference) *configurationGraph { + graph := &configurationGraph{ + nodes: []*installStrategyNode{}, + defaults: &installStrategyNode{ + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{}, + children: map[string]*addonNode{}, + }, + } + + // init graph.defaults.desiredConfigs with supportedConfigs + for _, config := range supportedConfigs { + if config.DefaultConfig != nil { + graph.defaults.desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: *config.DefaultConfig, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: *config.DefaultConfig, + }, + } + } + } + // copy the spechash from cma status defaultConfigReferences + for _, configRef := range defaultConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + defaultsDesiredConfig, ok := graph.defaults.desiredConfigs[configRef.ConfigGroupResource] + if ok && (defaultsDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + defaultsDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + + return graph +} + +// addAddonNode to the graph, starting from placement with the highest order +func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn) { + for i := len(g.nodes) - 1; i >= 0; i-- { + if g.nodes[i].clusters.Has(mca.Namespace) { + g.nodes[i].addNode(mca) + return + } + } + + g.defaults.addNode(mca) +} + +// addNode delete clusters on existing graph so the new configuration overrides the previous +func (g *configurationGraph) addPlacementNode( + installStrategy addonv1alpha1.PlacementStrategy, + installProgression addonv1alpha1.InstallProgression, + placementLister clusterlisterv1beta1.PlacementLister, + placementDecisionGetter PlacementDecisionGetter, +) error { + placementRef := installProgression.PlacementRef + installConfigReference := installProgression.ConfigReferences + + // get placement + if placementLister == nil { + return fmt.Errorf("invalid placement lister %v", placementLister) + } + placement, err := placementLister.Placements(placementRef.Namespace).Get(placementRef.Name) + if err != nil { + return err + } + + // new decision tracker + pdTracker := clustersdkv1beta1.NewPlacementDecisionClustersTracker(placement, placementDecisionGetter, nil) + + // refresh and get existing decision clusters + err = pdTracker.Refresh() + if err != nil { + return err + } + clusters := pdTracker.ExistingClusterGroupsBesides().GetClusters() + + node := &installStrategyNode{ + placementRef: placementRef, + pdTracker: pdTracker, + rolloutStrategy: installStrategy.RolloutStrategy, + desiredConfigs: g.defaults.desiredConfigs, + children: map[string]*addonNode{}, + clusters: clusters, + } + + // Set MaxConcurrency + // If progressive strategy is not initialized or MaxConcurrency is not specified, set MaxConcurrency to the default value + if node.rolloutStrategy.Type == clusterv1alpha1.Progressive { + progressiveStrategy := node.rolloutStrategy.Progressive + + if progressiveStrategy == nil { + progressiveStrategy = &clusterv1alpha1.RolloutProgressive{} + } + if progressiveStrategy.MaxConcurrency.StrVal == "" && progressiveStrategy.MaxConcurrency.IntVal == 0 { + progressiveStrategy.MaxConcurrency = placement.Spec.DecisionStrategy.GroupStrategy.ClustersPerDecisionGroup + } + + node.rolloutStrategy.Progressive = progressiveStrategy + } + + // overrides configuration by install strategy + if len(installConfigReference) > 0 { + node.desiredConfigs = node.desiredConfigs.copy() + for _, configRef := range installConfigReference { + if configRef.DesiredConfig == nil { + continue + } + node.desiredConfigs[configRef.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: configRef.ConfigGroupResource, + ConfigReferent: configRef.DesiredConfig.ConfigReferent, + DesiredConfig: configRef.DesiredConfig.DeepCopy(), + } + } + } + + // remove addon in defaults and other placements. + for _, cluster := range node.clusters.UnsortedList() { + if _, ok := g.defaults.children[cluster]; ok { + node.addNode(g.defaults.children[cluster].mca) + delete(g.defaults.children, cluster) + } + for _, placementNode := range g.nodes { + if _, ok := placementNode.children[cluster]; ok { + node.addNode(placementNode.children[cluster].mca) + delete(placementNode.children, cluster) + } + } + } + g.nodes = append(g.nodes, node) + return nil +} + +func (g *configurationGraph) generateRolloutResult() error { + for _, node := range g.nodes { + if err := node.generateRolloutResult(); err != nil { + return err + } + } + if err := g.defaults.generateRolloutResult(); err != nil { + return err + } + return nil +} + +func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode { + placementNodeMap := map[addonv1alpha1.PlacementRef]*installStrategyNode{} + for _, node := range g.nodes { + placementNodeMap[node.placementRef] = node + } + + return placementNodeMap +} + +func (g *configurationGraph) getAddonsToUpdate() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.getAddonsToUpdate()...) + } + + addons = append(addons, g.defaults.getAddonsToUpdate()...) + + return addons +} + +func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn) { + n.children[addon.Namespace] = &addonNode{ + mca: addon, + desiredConfigs: n.desiredConfigs, + } + + // override configuration by mca spec + if len(addon.Spec.Configs) > 0 { + n.children[addon.Namespace].desiredConfigs = n.children[addon.Namespace].desiredConfigs.copy() + // TODO we should also filter out the configs which are not supported configs. + for _, config := range addon.Spec.Configs { + n.children[addon.Namespace].desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: config.ConfigReferent, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: config.ConfigReferent, + }, + } + // copy the spechash from mca status + for _, configRef := range addon.Status.ConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + nodeDesiredConfig, ok := n.children[addon.Namespace].desiredConfigs[configRef.ConfigGroupResource] + if ok && (nodeDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + nodeDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + } + } + + // set addon node rollout status + n.children[addon.Namespace].setRolloutStatus() +} + +func (n *installStrategyNode) generateRolloutResult() error { + if n.placementRef.Name == "" { + // default addons + rolloutResult := clusterv1sdkalpha1.RolloutResult{} + rolloutResult.ClustersToRollout = []clusterv1sdkalpha1.ClusterRolloutStatus{} + for name, addon := range n.children { + if addon.status == nil { + return fmt.Errorf("failed to get rollout status on cluster %v", name) + } + if addon.status.Status != clusterv1sdkalpha1.Succeeded { + rolloutResult.ClustersToRollout = append(rolloutResult.ClustersToRollout, *addon.status) + } + } + n.rolloutResult = rolloutResult + } else { + // placement addons + rolloutHandler, err := clusterv1sdkalpha1.NewRolloutHandler(n.pdTracker, getClusterRolloutStatus) + if err != nil { + return err + } + + // get existing addons + existingRolloutClusters := []clusterv1sdkalpha1.ClusterRolloutStatus{} + for name, addon := range n.children { + clsRolloutStatus, err := getClusterRolloutStatus(name, addon) + if err != nil { + return err + } + existingRolloutClusters = append(existingRolloutClusters, clsRolloutStatus) + } + + // sort by cluster name + sort.SliceStable(existingRolloutClusters, func(i, j int) bool { + return existingRolloutClusters[i].ClusterName < existingRolloutClusters[j].ClusterName + }) + + _, rolloutResult, err := rolloutHandler.GetRolloutCluster(n.rolloutStrategy, existingRolloutClusters) + if err != nil { + return err + } + n.rolloutResult = rolloutResult + } + + return nil +} + +// addonToUpdate finds the addons to be updated by placement +func (n *installStrategyNode) getAddonsToUpdate() []*addonNode { + var addons []*addonNode + var clusters []string + + // get addon to update from rollout result + for _, c := range n.rolloutResult.ClustersToRollout { + if _, exist := n.children[c.ClusterName]; exist { + clusters = append(clusters, c.ClusterName) + } + } + + // sort addons by name + sort.Strings(clusters) + for _, k := range clusters { + addons = append(addons, n.children[k]) + } + return addons +} + +func (n *installStrategyNode) countAddonUpgradeSucceed() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1sdkalpha1.Succeeded { + count += 1 + } + } + return count +} + +func (n *installStrategyNode) countAddonUpgrading() int { + count := 0 + for _, addon := range n.children { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1sdkalpha1.Progressing { + count += 1 + } + } + return count +} + +func (n *installStrategyNode) countAddonTimeOut() int { + return len(n.rolloutResult.ClustersTimeOut) +} + +func getClusterRolloutStatus(clusterName string, addonNode *addonNode) (clusterv1sdkalpha1.ClusterRolloutStatus, error) { + if addonNode.status == nil { + return clusterv1sdkalpha1.ClusterRolloutStatus{}, fmt.Errorf("failed to get rollout status on cluster %v", clusterName) + } + return *addonNode.status, nil +} + +func desiredConfigsEqual(a, b addonConfigMap) bool { + if len(a) != len(b) { + return false + } + + for configgrA := range a { + if a[configgrA] != b[configgrA] { + return false + } + } + + return true +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go new file mode 100644 index 00000000..36d71238 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go @@ -0,0 +1,143 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type clusterManagementAddonProgressingReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *clusterManagementAddonProgressingReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + cmaCopy := cma.DeepCopy() + placementNodes := graph.getPlacementNodes() + + // go through addons and update condition per install progression + for i, installProgression := range cmaCopy.Status.InstallProgressions { + placementNode, exist := placementNodes[installProgression.PlacementRef] + if !exist { + continue + } + + isUpgrade := false + + for _, configReference := range installProgression.ConfigReferences { + if configReference.LastAppliedConfig != nil { + isUpgrade = true + break + } + } + + setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i], + isUpgrade, + placementNode.countAddonUpgrading(), + placementNode.countAddonUpgradeSucceed(), + placementNode.countAddonTimeOut(), + len(placementNode.clusters), + ) + } + + err := d.patchMgmtAddonStatus(ctx, cmaCopy, cma) + if err != nil { + errs = append(errs, err) + } + return cmaCopy, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *clusterManagementAddonProgressingReconciler) patchMgmtAddonStatus(ctx context.Context, new, old *addonv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching clustermanagementaddon %s status with %s", new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func setAddOnInstallProgressionsAndLastApplied( + installProgression *addonv1alpha1.InstallProgression, + isUpgrade bool, + progressing, done, timeout, total int) { + // always update progressing condition when there is no config + // skip update progressing condition when last applied config already the same as desired + skip := len(installProgression.ConfigReferences) > 0 + for _, configReference := range installProgression.ConfigReferences { + if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) && + !equality.Semantic.DeepEqual(configReference.LastKnownGoodConfig, configReference.DesiredConfig) { + skip = false + } + } + if skip { + return + } + condition := metav1.Condition{ + Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, + } + if (total == 0 && done == 0) || (done != total) { + condition.Status = metav1.ConditionTrue + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgrading + condition.Message = fmt.Sprintf("%d/%d upgrading..., %d timeout.", progressing+done, total, timeout) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstalling + condition.Message = fmt.Sprintf("%d/%d installing..., %d timeout.", progressing+done, total, timeout) + } + } else { + for i, configRef := range installProgression.ConfigReferences { + installProgression.ConfigReferences[i].LastAppliedConfig = configRef.DesiredConfig.DeepCopy() + installProgression.ConfigReferences[i].LastKnownGoodConfig = configRef.DesiredConfig.DeepCopy() + } + condition.Status = metav1.ConditionFalse + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed + condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors, %d timeout.", done, total, timeout) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed + condition.Message = fmt.Sprintf("%d/%d install completed with no errors, %d timeout.", done, total, timeout) + } + } + meta.SetStatusCondition(&installProgression.Conditions, condition) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go new file mode 100644 index 00000000..beff5b4b --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go @@ -0,0 +1,100 @@ +package addonowner + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +const UnsupportedConfigurationType = "UnsupportedConfiguration" + +// addonOwnerController reconciles instances of managedclusteradd on the hub +// to add related ClusterManagementAddon as the owner. +type addonOwnerController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonFilterFunc factory.EventFilterFunc +} + +func NewAddonOwnerController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonOwnerController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, clusterManagementAddonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()).WithSync(c.sync).ToController("addon-owner-controller") +} + +func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + klog.V(4).Infof("Reconciling addon %q", key) + + namespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + addonCopy := addon.DeepCopy() + modified := false + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if !c.addonFilterFunc(clusterManagementAddon) { + return nil + } + + owner := metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + modified = utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) + if modified { + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Update(ctx, addonCopy, metav1.UpdateOptions{}) + return err + } + + return nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/rollout.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/rollout.go new file mode 100644 index 00000000..23e04c47 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1/rollout.go @@ -0,0 +1,616 @@ +package v1alpha1 + +import ( + "fmt" + "math" + "regexp" + "sort" + "strconv" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/clock" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + clusterv1beta1sdk "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1" +) + +var RolloutClock = clock.Clock(clock.RealClock{}) +var maxTimeDuration = time.Duration(math.MaxInt64) + +// RolloutStatus represents the status of a rollout operation. +type RolloutStatus int + +const ( + // ToApply indicates that the resource's desired status has not been applied yet. + ToApply RolloutStatus = iota + // Progressing indicates that the resource's desired status is applied and last applied status is not updated. + Progressing + // Succeeded indicates that the resource's desired status is applied and last applied status is successful. + Succeeded + // Failed indicates that the resource's desired status is applied and last applied status has failed. + Failed + // TimeOut indicates that the rollout status is progressing or failed and the status remains + // for longer than the timeout, resulting in a timeout status. + TimeOut + // Skip indicates that the rollout should be skipped on this cluster. + Skip +) + +// ClusterRolloutStatus holds the rollout status information for a cluster. +type ClusterRolloutStatus struct { + // cluster name + ClusterName string + // GroupKey represents the cluster group key (optional field). + GroupKey clusterv1beta1sdk.GroupKey + // Status is the required field indicating the rollout status. + Status RolloutStatus + // LastTransitionTime is the last transition time of the rollout status (optional field). + // Used to calculate timeout for progressing and failed status and minimum success time (i.e. soak + // time) for succeeded status. + LastTransitionTime *metav1.Time + // TimeOutTime is the timeout time when the status is progressing or failed (optional field). + TimeOutTime *metav1.Time +} + +// RolloutResult contains list of clusters that are timeOut, removed and required to rollOut. A +// boolean is also provided signaling that the rollout may be shortened due to the number of failed +// clusters exceeding the MaxFailure threshold. +type RolloutResult struct { + // ClustersToRollout is a slice of ClusterRolloutStatus that will be rolled out. + ClustersToRollout []ClusterRolloutStatus + // ClustersTimeOut is a slice of ClusterRolloutStatus that are timeout. + ClustersTimeOut []ClusterRolloutStatus + // ClustersRemoved is a slice of ClusterRolloutStatus that are removed. + ClustersRemoved []ClusterRolloutStatus + // MaxFailureBreach is a boolean signaling whether the rollout was cut short because of failed clusters. + MaxFailureBreach bool + // RecheckAfter is the time duration to recheck the rollout status. + RecheckAfter *time.Duration +} + +// ClusterRolloutStatusFunc defines a function that return the rollout status for a given workload. +type ClusterRolloutStatusFunc[T any] func(clusterName string, workload T) (ClusterRolloutStatus, error) + +// The RolloutHandler required workload type (interface/struct) to be assigned to the generic type. +// The custom implementation of the ClusterRolloutStatusFunc is required to use the RolloutHandler. +type RolloutHandler[T any] struct { + // placement decision tracker + pdTracker *clusterv1beta1sdk.PlacementDecisionClustersTracker + statusFunc ClusterRolloutStatusFunc[T] +} + +// NewRolloutHandler creates a new RolloutHandler with the given workload type. +func NewRolloutHandler[T any](pdTracker *clusterv1beta1sdk.PlacementDecisionClustersTracker, statusFunc ClusterRolloutStatusFunc[T]) (*RolloutHandler[T], error) { + if pdTracker == nil { + return nil, fmt.Errorf("invalid placement decision tracker %v", pdTracker) + } + + return &RolloutHandler[T]{pdTracker: pdTracker, statusFunc: statusFunc}, nil +} + +// The inputs are a RolloutStrategy and existingClusterRolloutStatus list. +// The existing ClusterRolloutStatus list should be created using the ClusterRolloutStatusFunc to determine the current workload rollout status. +// The existing ClusterRolloutStatus list should contain all the current workloads rollout status such as ToApply, Progressing, Succeeded, +// Failed, TimeOut and Skip in order to determine the added, removed, timeout clusters and next clusters to rollout. +// +// Return the actual RolloutStrategy that take effect and a RolloutResult contain list of ClusterToRollout, ClustersTimeout and ClusterRemoved. +func (r *RolloutHandler[T]) GetRolloutCluster(rolloutStrategy clusterv1alpha1.RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*clusterv1alpha1.RolloutStrategy, RolloutResult, error) { + switch rolloutStrategy.Type { + case clusterv1alpha1.All: + return r.getRolloutAllClusters(rolloutStrategy, existingClusterStatus) + case clusterv1alpha1.Progressive: + return r.getProgressiveClusters(rolloutStrategy, existingClusterStatus) + case clusterv1alpha1.ProgressivePerGroup: + return r.getProgressivePerGroupClusters(rolloutStrategy, existingClusterStatus) + default: + return nil, RolloutResult{}, fmt.Errorf("incorrect rollout strategy type %v", rolloutStrategy.Type) + } +} + +func (r *RolloutHandler[T]) getRolloutAllClusters(rolloutStrategy clusterv1alpha1.RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*clusterv1alpha1.RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.All} + strategy.All = rolloutStrategy.All.DeepCopy() + if strategy.All == nil { + strategy.All = &clusterv1alpha1.RolloutAll{} + } + + // Parse timeout for the rollout + failureTimeout, err := parseTimeout(strategy.All.ProgressDeadline) + if err != nil { + return &strategy, RolloutResult{}, err + } + + allClusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + allClusters := allClusterGroups.GetClusters().UnsortedList() + + // Check for removed Clusters + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(allClusterGroups, existingClusterStatus) + rolloutResult := progressivePerCluster(allClusterGroups, len(allClusters), len(allClusters), time.Duration(0), failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus + + return &strategy, rolloutResult, nil +} + +func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy clusterv1alpha1.RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*clusterv1alpha1.RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.Progressive} + strategy.Progressive = rolloutStrategy.Progressive.DeepCopy() + if strategy.Progressive == nil { + strategy.Progressive = &clusterv1alpha1.RolloutProgressive{} + } + minSuccessTime := strategy.Progressive.MinSuccessTime.Duration + + // Parse timeout for non-mandatory decision groups + failureTimeout, err := parseTimeout(strategy.Progressive.ProgressDeadline) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Check for removed clusters + clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) + + // Parse maximum failure threshold for continuing the rollout, defaulting to zero + maxFailures, err := calculateRolloutSize(strategy.Progressive.MaxFailures, len(clusterGroups.GetClusters()), 0) + if err != nil { + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) + } + + // Upgrade mandatory decision groups first + groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) + clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) + + // Perform progressive rollOut for mandatory decision groups first, tolerating no failures + if len(clusterGroups) > 0 { + rolloutResult := progressivePerGroup( + clusterGroups, intstr.FromInt32(0), minSuccessTime, failureTimeout, currentClusterStatus, + ) + if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { + rolloutResult.ClustersRemoved = removedClusterStatus + return &strategy, rolloutResult, nil + } + } + + // Calculate the size of progressive rollOut + // If the MaxConcurrency not defined, total clusters length is considered as maxConcurrency. + clusterGroups = r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) + rolloutSize, err := calculateRolloutSize(strategy.Progressive.MaxConcurrency, len(clusterGroups.GetClusters()), len(clusterGroups.GetClusters())) + if err != nil { + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxConcurrency: %w", err) + } + + // Rollout the remaining clusters + rolloutResult := progressivePerCluster(clusterGroups, rolloutSize, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus + + return &strategy, rolloutResult, nil +} + +func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy clusterv1alpha1.RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*clusterv1alpha1.RolloutStrategy, RolloutResult, error) { + // Prepare the rollout strategy + strategy := clusterv1alpha1.RolloutStrategy{Type: clusterv1alpha1.ProgressivePerGroup} + strategy.ProgressivePerGroup = rolloutStrategy.ProgressivePerGroup.DeepCopy() + if strategy.ProgressivePerGroup == nil { + strategy.ProgressivePerGroup = &clusterv1alpha1.RolloutProgressivePerGroup{} + } + minSuccessTime := strategy.ProgressivePerGroup.MinSuccessTime.Duration + maxFailures := strategy.ProgressivePerGroup.MaxFailures + + // Parse timeout for non-mandatory decision groups + failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.ProgressDeadline) + if err != nil { + return &strategy, RolloutResult{}, err + } + + // Check format of MaxFailures--this value will be re-parsed and used in progressivePerGroup() + err = parseRolloutSize(maxFailures) + if err != nil { + return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) + } + + // Check for removed Clusters + clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() + currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) + + // Upgrade mandatory decision groups first + mandatoryDecisionGroups := strategy.ProgressivePerGroup.MandatoryDecisionGroups.MandatoryDecisionGroups + groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) + clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) + + // Perform progressive rollout per group for mandatory decision groups first, tolerating no failures + if len(clusterGroups) > 0 { + rolloutResult := progressivePerGroup(clusterGroups, intstr.FromInt32(0), minSuccessTime, failureTimeout, currentClusterStatus) + + if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { + rolloutResult.ClustersRemoved = removedClusterStatus + return &strategy, rolloutResult, nil + } + } + + // RollOut the rest of the decision groups + restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) + + // Perform progressive rollout per group for the remaining decision groups + rolloutResult := progressivePerGroup(restClusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) + rolloutResult.ClustersRemoved = removedClusterStatus + + return &strategy, rolloutResult, nil +} + +func (r *RolloutHandler[T]) getRemovedClusters(clusterGroupsMap clusterv1beta1sdk.ClusterGroupsMap, existingClusterStatus []ClusterRolloutStatus) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { + var currentClusterStatus, removedClusterStatus []ClusterRolloutStatus + + clusters := clusterGroupsMap.GetClusters().UnsortedList() + for _, clusterStatus := range existingClusterStatus { + exist := false + for _, cluster := range clusters { + if clusterStatus.ClusterName == cluster { + exist = true + currentClusterStatus = append(currentClusterStatus, clusterStatus) + break + } + } + + if !exist { + removedClusterStatus = append(removedClusterStatus, clusterStatus) + } + } + return currentClusterStatus, removedClusterStatus +} + +// progressivePerCluster parses the rollout status for the given clusters and returns the rollout +// result. It sorts the clusters alphabetically in order to determine the rollout groupings and the +// rollout group size is determined by the MaxConcurrency setting. +func progressivePerCluster( + clusterGroupsMap clusterv1beta1sdk.ClusterGroupsMap, + rolloutSize int, + maxFailures int, + minSuccessTime time.Duration, + timeout time.Duration, + existingClusterStatus []ClusterRolloutStatus, +) RolloutResult { + var rolloutClusters, timeoutClusters []ClusterRolloutStatus + existingClusters := make(map[string]bool) + failureCount := 0 + failureBreach := false + + // Sort existing cluster status for consistency in case ToApply was determined by the workload applier + sort.Slice(existingClusterStatus, func(i, j int) bool { + return existingClusterStatus[i].ClusterName < existingClusterStatus[j].ClusterName + }) + + // Collect existing cluster status and determine any TimeOut statuses + for _, status := range existingClusterStatus { + if status.ClusterName == "" { + continue + } + + existingClusters[status.ClusterName] = true + + // If there was a breach of MaxFailures, only handle clusters that have already had workload applied + if !failureBreach || failureBreach && status.Status != ToApply { + // For progress per cluster, the length of existing `rolloutClusters` will be compared with the + // target rollout size to determine whether to return or not first. + // The timeoutClusters, as well as failed clusters will be counted into failureCount, the next rollout + // will stop if failureCount > maxFailures. + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + } + + // Keep track of TimeOut or Failed clusters and check total against MaxFailures + if status.Status == TimeOut || status.Status == Failed { + failureCount++ + + failureBreach = failureCount > maxFailures + } + + // Return if the list of exsiting rollout clusters has reached the target rollout size + if len(rolloutClusters) >= rolloutSize { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } + } + } + + // Return if the exsiting rollout clusters maxFailures is breached. + if failureBreach { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } + } + + clusters := clusterGroupsMap.GetClusters().UnsortedList() + clusterToGroupKey := clusterGroupsMap.ClusterToGroupKey() + + // Sort the clusters in alphabetical order to ensure consistency. + sort.Strings(clusters) + + // Amend clusters to the rollout up to the rollout size + for _, cluster := range clusters { + if existingClusters[cluster] { + continue + } + + // For clusters without a rollout status, set the status to ToApply + status := ClusterRolloutStatus{ + ClusterName: cluster, + Status: ToApply, + GroupKey: clusterToGroupKey[cluster], + } + rolloutClusters = append(rolloutClusters, status) + + // Return if the list of rollout clusters has reached the target rollout size + if len(rolloutClusters) >= rolloutSize { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } + } + } + + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } +} + +func progressivePerGroup( + clusterGroupsMap clusterv1beta1sdk.ClusterGroupsMap, + maxFailures intstr.IntOrString, + minSuccessTime time.Duration, + timeout time.Duration, + existingClusterStatus []ClusterRolloutStatus, +) RolloutResult { + var rolloutClusters, timeoutClusters []ClusterRolloutStatus + existingClusters := make(map[string]RolloutStatus) + + // Collect existing cluster status and determine any TimeOut statuses + for _, status := range existingClusterStatus { + if status.ClusterName == "" { + continue + } + + // ToApply will be reconsidered in the decisionGroups iteration. + if status.Status != ToApply { + // For progress per group, the existing rollout clusters and timeout clusters status will be recored in existingClusters first, + // then go through group by group. + rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) + existingClusters[status.ClusterName] = status.Status + } + } + + totalFailureCount := 0 + failureBreach := false + clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() + for _, key := range clusterGroupKeys { + groupFailureCount := 0 + if subclusters, ok := clusterGroupsMap[key]; ok { + // Calculate the max failure threshold for the group--the returned error was checked + // previously, so it's ignored here + maxGroupFailures, _ := calculateRolloutSize(maxFailures, len(subclusters), 0) + // Iterate through clusters in the group + clusters := subclusters.UnsortedList() + sort.Strings(clusters) + for _, cluster := range clusters { + if status, ok := existingClusters[cluster]; ok { + // Keep track of TimeOut or Failed clusters and check total against MaxFailures + if status == TimeOut || status == Failed { + groupFailureCount++ + + failureBreach = groupFailureCount > maxGroupFailures + } + + continue + } + + status := ClusterRolloutStatus{ + ClusterName: cluster, + Status: ToApply, + GroupKey: key, + } + rolloutClusters = append(rolloutClusters, status) + } + + totalFailureCount += groupFailureCount + + // As it is perGroup, return if there are clusters to rollOut that aren't + // Failed/Timeout, or there was a breach of the MaxFailure configuration + if len(rolloutClusters)-totalFailureCount > 0 || failureBreach { + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } + } + } + } + + return RolloutResult{ + ClustersToRollout: rolloutClusters, + ClustersTimeOut: timeoutClusters, + MaxFailureBreach: failureBreach, + RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), + } +} + +// determineRolloutStatus checks whether a cluster should continue its rollout based on its current +// status and timeout. The function updates the cluster status and appends it to the expected slice. +// Nothing is done for TimeOut or Skip statuses. +// +// The minSuccessTime parameter is utilized for handling succeeded clusters that are still within +// the configured soak time, in which case the cluster will be returned as a rolloutCluster. +// +// The timeout parameter is utilized for handling progressing and failed statuses and any other +// unknown status: +// 1. If timeout is set to None (maxTimeDuration), the function will append the clusterStatus to +// the rollOut Clusters. +// 2. If timeout is set to 0, the function append the clusterStatus to the timeOut clusters. +func determineRolloutStatus( + status *ClusterRolloutStatus, + minSuccessTime time.Duration, + timeout time.Duration, + rolloutClusters []ClusterRolloutStatus, + timeoutClusters []ClusterRolloutStatus, +) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { + + switch status.Status { + case ToApply: + rolloutClusters = append(rolloutClusters, *status) + case Succeeded: + // If the cluster succeeded but is still within the MinSuccessTime (i.e. "soak" time), + // still add it to the list of rolloutClusters + minSuccessTimeTime := getTimeOutTime(status.LastTransitionTime, minSuccessTime) + if RolloutClock.Now().Before(minSuccessTimeTime.Time) { + rolloutClusters = append(rolloutClusters, *status) + } + + return rolloutClusters, timeoutClusters + case TimeOut, Skip: + return rolloutClusters, timeoutClusters + default: // For progressing, failed, or unknown status. + timeOutTime := getTimeOutTime(status.LastTransitionTime, timeout) + status.TimeOutTime = timeOutTime + // check if current time is before the timeout time + if timeOutTime == nil || RolloutClock.Now().Before(timeOutTime.Time) { + rolloutClusters = append(rolloutClusters, *status) + } else { + status.Status = TimeOut + timeoutClusters = append(timeoutClusters, *status) + } + } + + return rolloutClusters, timeoutClusters +} + +// getTimeOutTime calculates the timeout time given a start time and duration, instantiating the +// RolloutClock if a start time isn't provided. +func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time { + var timeoutTime time.Time + // if timeout is not set (default to maxTimeDuration), the timeout time should not be set + if timeout == maxTimeDuration { + return nil + } + if startTime == nil { + timeoutTime = RolloutClock.Now().Add(timeout) + } else { + timeoutTime = startTime.Add(timeout) + } + return &metav1.Time{Time: timeoutTime} +} + +// calculateRolloutSize calculates the maximum portion from a total number of clusters by parsing a +// maximum threshold value that can be either a quantity or a percent, returning an error if the +// threshold can't be parsed to either of those. +func calculateRolloutSize(maxThreshold intstr.IntOrString, total int, defaultThreshold int) (int, error) { + length := defaultThreshold + + // Verify the format of the IntOrString value + err := parseRolloutSize(maxThreshold) + if err != nil { + return length, err + } + + // Calculate the rollout size--errors are ignored because + // they were handled in parseRolloutSize() previously + switch maxThreshold.Type { + case intstr.Int: + length = maxThreshold.IntValue() + case intstr.String: + str := maxThreshold.StrVal + f, _ := strconv.ParseFloat(str[:len(str)-1], 64) + length = int(math.Ceil(f / 100 * float64(total))) + } + + if length <= 0 || length > total { + length = defaultThreshold + } + + return length, nil +} + +// parseRolloutSize parses a maximum threshold value that can be either a quantity or a percent, +// returning an error if the threshold can't be parsed to either of those. +func parseRolloutSize(maxThreshold intstr.IntOrString) error { + + switch maxThreshold.Type { + case intstr.Int: + break + case intstr.String: + str := maxThreshold.StrVal + if strings.HasSuffix(str, "%") { + _, err := strconv.ParseFloat(str[:len(str)-1], 64) + if err != nil { + return err + } + } else { + return fmt.Errorf("'%s' is an invalid maximum threshold value: string is not a percentage", str) + } + default: + return fmt.Errorf("invalid maximum threshold type %+v", maxThreshold.Type) + } + + return nil +} + +// ParseTimeout will return the maximum possible duration given "None", an empty string, or an +// invalid duration, otherwise parsing and returning the duration provided. +func parseTimeout(timeoutStr string) (time.Duration, error) { + // Define the regex pattern to match the timeout string + pattern := "^(([0-9])+[h|m|s])|None$" + regex := regexp.MustCompile(pattern) + + if timeoutStr == "None" || timeoutStr == "" { + // If the timeout is "None" or empty, return the maximum duration + return maxTimeDuration, nil + } + + // Check if the timeout string matches the pattern + if !regex.MatchString(timeoutStr) { + return maxTimeDuration, fmt.Errorf("invalid timeout format") + } + + return time.ParseDuration(timeoutStr) +} + +func decisionGroupsToGroupKeys(decisionsGroup []clusterv1alpha1.MandatoryDecisionGroup) []clusterv1beta1sdk.GroupKey { + var result []clusterv1beta1sdk.GroupKey + for _, d := range decisionsGroup { + gk := clusterv1beta1sdk.GroupKey{} + // GroupName is considered first to select the decisionGroups then GroupIndex. + if d.GroupName != "" { + gk.GroupName = d.GroupName + } else { + gk.GroupIndex = d.GroupIndex + } + result = append(result, gk) + } + return result +} + +func minRecheckAfter(rolloutClusters []ClusterRolloutStatus, minSuccessTime time.Duration) *time.Duration { + var minRecheckAfter *time.Duration + for _, r := range rolloutClusters { + if r.TimeOutTime != nil { + timeOut := r.TimeOutTime.Sub(RolloutClock.Now()) + if minRecheckAfter == nil || *minRecheckAfter > timeOut { + minRecheckAfter = &timeOut + } + } + } + if minSuccessTime != 0 && (minRecheckAfter == nil || minSuccessTime < *minRecheckAfter) { + minRecheckAfter = &minSuccessTime + } + + return minRecheckAfter +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1/placement.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1/placement.go new file mode 100644 index 00000000..4db8d198 --- /dev/null +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1/placement.go @@ -0,0 +1,273 @@ +package v1beta1 + +import ( + "fmt" + "sort" + "strconv" + "sync" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +type PlacementDecisionGetter interface { + List(selector labels.Selector, namespace string) (ret []*clusterv1beta1.PlacementDecision, err error) +} + +type PlacementDecisionClustersTracker struct { + placement *clusterv1beta1.Placement + placementDecisionGetter PlacementDecisionGetter + existingScheduledClusterGroups ClusterGroupsMap + clusterGroupsIndexToName map[int32]string + clusterGroupsNameToIndex map[string][]int32 + lock sync.RWMutex +} + +type GroupKey struct { + GroupName string `json:"groupName,omitempty"` + GroupIndex int32 `json:"groupIndex,omitempty"` +} + +// NewPlacementDecisionClustersTracker initializes a PlacementDecisionClustersTracker +// using existing clusters. Clusters are added to the default cluster group with index 0. +// Set existingScheduledClusters to nil if there are no existing clusters. +func NewPlacementDecisionClustersTracker(placement *clusterv1beta1.Placement, pdl PlacementDecisionGetter, existingScheduledClusters sets.Set[string]) *PlacementDecisionClustersTracker { + pdct := &PlacementDecisionClustersTracker{ + placement: placement, + placementDecisionGetter: pdl, + existingScheduledClusterGroups: ClusterGroupsMap{{GroupIndex: 0}: existingScheduledClusters}, + } + + // Generate group name indices for the tracker. + pdct.generateGroupsNameIndex() + return pdct +} + +// NewPlacementDecisionClustersTrackerWithGroups initializes a PlacementDecisionClustersTracker +// using existing cluster groups. Set existingScheduledClusterGroups to nil if no groups exist. +func NewPlacementDecisionClustersTrackerWithGroups(placement *clusterv1beta1.Placement, pdl PlacementDecisionGetter, existingScheduledClusterGroups ClusterGroupsMap) *PlacementDecisionClustersTracker { + pdct := &PlacementDecisionClustersTracker{ + placement: placement, + placementDecisionGetter: pdl, + existingScheduledClusterGroups: existingScheduledClusterGroups, + } + + // Generate group name indices for the tracker. + pdct.generateGroupsNameIndex() + return pdct +} + +// Refresh refreshes the tracker's decisionClusters. +func (pdct *PlacementDecisionClustersTracker) Refresh() error { + pdct.lock.Lock() + defer pdct.lock.Unlock() + + if pdct.placement == nil || pdct.placementDecisionGetter == nil { + return nil + } + + // Get the generated PlacementDecisions + decisionSelector := labels.SelectorFromSet(labels.Set{ + clusterv1beta1.PlacementLabel: pdct.placement.Name, + }) + decisions, err := pdct.placementDecisionGetter.List(decisionSelector, pdct.placement.Namespace) + if err != nil { + return fmt.Errorf("failed to list PlacementDecisions: %w", err) + } + + // Get the decision cluster names and groups + newScheduledClusterGroups := map[GroupKey]sets.Set[string]{} + for _, d := range decisions { + groupKey, err := parseGroupKeyFromDecision(d) + if err != nil { + return err + } + + if _, exist := newScheduledClusterGroups[groupKey]; !exist { + newScheduledClusterGroups[groupKey] = sets.New[string]() + } + + for _, sd := range d.Status.Decisions { + newScheduledClusterGroups[groupKey].Insert(sd.ClusterName) + } + } + + // Update the existing decision cluster groups + pdct.existingScheduledClusterGroups = newScheduledClusterGroups + pdct.generateGroupsNameIndex() + + return nil +} + +// GetClusterChanges updates the tracker's decisionClusters and returns added and deleted cluster names. +func (pdct *PlacementDecisionClustersTracker) GetClusterChanges() (sets.Set[string], sets.Set[string], error) { + // Get existing clusters + existingScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters() + + // Refresh clusters + err := pdct.Refresh() + if err != nil { + return nil, nil, err + } + newScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters() + + // Compare the difference + added := newScheduledClusters.Difference(existingScheduledClusters) + deleted := existingScheduledClusters.Difference(newScheduledClusters) + + return added, deleted, nil +} + +func (pdct *PlacementDecisionClustersTracker) generateGroupsNameIndex() { + pdct.clusterGroupsIndexToName = map[int32]string{} + pdct.clusterGroupsNameToIndex = map[string][]int32{} + + for groupkey := range pdct.existingScheduledClusterGroups { + // index to name + pdct.clusterGroupsIndexToName[groupkey.GroupIndex] = groupkey.GroupName + // name to index + if index, exist := pdct.clusterGroupsNameToIndex[groupkey.GroupName]; exist { + pdct.clusterGroupsNameToIndex[groupkey.GroupName] = append(index, groupkey.GroupIndex) + } else { + pdct.clusterGroupsNameToIndex[groupkey.GroupName] = []int32{groupkey.GroupIndex} + } + } + + // sort index order + for _, index := range pdct.clusterGroupsNameToIndex { + sort.Slice(index, func(i, j int) bool { + return index[i] < index[j] + }) + } +} + +// ExistingClusterGroups returns the tracker's existing decision cluster groups for groups listed in groupKeys. +// Return empty set when groupKeys is empty. +func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroups(groupKeys ...GroupKey) ClusterGroupsMap { + pdct.lock.RLock() + defer pdct.lock.RUnlock() + + resultClusterGroups := make(map[GroupKey]sets.Set[string]) + + includeGroupKeys := pdct.fulfillGroupKeys(groupKeys) + for _, groupKey := range includeGroupKeys { + if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { + resultClusterGroups[groupKey] = clusters + } + } + + return resultClusterGroups +} + +// ExistingClusterGroupsBesides returns the tracker's existing decision cluster groups except cluster groups listed in groupKeys. +// Return all the clusters when groupKeys is empty. +func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroupsBesides(groupKeys ...GroupKey) ClusterGroupsMap { + pdct.lock.RLock() + defer pdct.lock.RUnlock() + + resultClusterGroups := make(map[GroupKey]sets.Set[string]) + + excludeGroupKeys := pdct.fulfillGroupKeys(groupKeys) + includeGroupKeys := pdct.getGroupKeysBesides(excludeGroupKeys) + for _, groupKey := range includeGroupKeys { + if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { + resultClusterGroups[groupKey] = clusters + } + } + + return resultClusterGroups +} + +// Fulfill the expect groupkeys with group name or group index, the returned groupkeys are ordered by input group name then group index. +// For example, the input is []GroupKey{{GroupName: "group1"}, {GroupIndex: 2}}, +// the returned is []GroupKey{{GroupName: "group1", GroupIndex: 0}, {GroupName: "group1", GroupIndex: 1}, {GroupName: "group2", GroupIndex: 2}} +func (pdct *PlacementDecisionClustersTracker) fulfillGroupKeys(groupKeys []GroupKey) []GroupKey { + fulfilledGroupKeys := []GroupKey{} + for _, gk := range groupKeys { + if gk.GroupName != "" { + if indexes, exist := pdct.clusterGroupsNameToIndex[gk.GroupName]; exist { + for _, groupIndex := range indexes { + fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: gk.GroupName, GroupIndex: groupIndex}) + } + } + } else { + if groupName, exist := pdct.clusterGroupsIndexToName[gk.GroupIndex]; exist { + fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: groupName, GroupIndex: gk.GroupIndex}) + } + } + } + return fulfilledGroupKeys +} + +func (pdct *PlacementDecisionClustersTracker) getGroupKeysBesides(groupKeyToExclude []GroupKey) []GroupKey { + groupKey := []GroupKey{} + for i := 0; i < len(pdct.clusterGroupsIndexToName); i++ { + gKey := GroupKey{GroupName: pdct.clusterGroupsIndexToName[int32(i)], GroupIndex: int32(i)} + if !containsGroupKey(groupKeyToExclude, gKey) { + groupKey = append(groupKey, gKey) + } + } + + return groupKey +} + +// ClusterGroupsMap is a custom type representing a map of group keys to sets of cluster names. +type ClusterGroupsMap map[GroupKey]sets.Set[string] + +// GetOrderedGroupKeys returns an ordered slice of GroupKeys, sorted by group index. +func (g ClusterGroupsMap) GetOrderedGroupKeys() []GroupKey { + groupKeys := []GroupKey{} + for groupKey := range g { + groupKeys = append(groupKeys, groupKey) + } + + // sort by group index index + sort.Slice(groupKeys, func(i, j int) bool { + return groupKeys[i].GroupIndex < groupKeys[j].GroupIndex + }) + + return groupKeys +} + +// GetClusters returns a set containing all clusters from all group sets. +func (g ClusterGroupsMap) GetClusters() sets.Set[string] { + clusterSet := sets.New[string]() + for _, clusterGroup := range g { + clusterSet = clusterSet.Union(clusterGroup) + } + return clusterSet +} + +// ClusterToGroupKey returns a mapping of cluster names to their respective group keys. +func (g ClusterGroupsMap) ClusterToGroupKey() map[string]GroupKey { + clusterToGroupKey := map[string]GroupKey{} + + for groupKey, clusterGroup := range g { + for c := range clusterGroup { + clusterToGroupKey[c] = groupKey + } + } + + return clusterToGroupKey +} + +// Helper function to check if a groupKey is present in the groupKeys slice. +func containsGroupKey(groupKeys []GroupKey, groupKey GroupKey) bool { + for _, gk := range groupKeys { + if gk == groupKey { + return true + } + } + return false +} + +func parseGroupKeyFromDecision(d *clusterv1beta1.PlacementDecision) (GroupKey, error) { + groupName := d.Labels[clusterv1beta1.DecisionGroupNameLabel] + groupIndex := d.Labels[clusterv1beta1.DecisionGroupIndexLabel] + groupIndexNum, err := strconv.Atoi(groupIndex) + if err != nil { + return GroupKey{}, fmt.Errorf("incorrect group index: %w", err) + } + return GroupKey{GroupName: groupName, GroupIndex: int32(groupIndexNum)}, nil +} diff --git a/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier/workapplier.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier/workapplier.go index 10332be8..e3c74956 100644 --- a/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier/workapplier.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier/workapplier.go @@ -128,7 +128,7 @@ func (w *WorkApplier) Delete(ctx context.Context, namespace, name string) error } func shouldUpdateMap(required, existing map[string]string) bool { - if len(required) > len(existing) { + if len(required) != len(existing) { return true } for key, value := range required { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index 1cecf88e..e7f2945f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net/http" + "sort" "time" "golang.org/x/exp/maps" @@ -421,7 +422,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { for namespace, cfg := range opts.DefaultNamespaces { cfg = defaultConfig(cfg, optionDefaultsToConfig(&opts)) if namespace == metav1.NamespaceAll { - cfg.FieldSelector = fields.AndSelectors(appendIfNotNil(namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), cfg.FieldSelector)...) + cfg.FieldSelector = fields.AndSelectors( + appendIfNotNil( + namespaceAllSelector(maps.Keys(opts.DefaultNamespaces)), + cfg.FieldSelector, + )..., + ) } opts.DefaultNamespaces[namespace] = cfg } @@ -435,7 +441,12 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { return opts, fmt.Errorf("type %T is not namespaced, but its ByObject.Namespaces setting is not nil", obj) } - // Default the namespace-level configs first, because they need to use the undefaulted type-level config. + if isNamespaced && byObject.Namespaces == nil { + byObject.Namespaces = maps.Clone(opts.DefaultNamespaces) + } + + // Default the namespace-level configs first, because they need to use the undefaulted type-level config + // to be able to potentially fall through to settings from DefaultNamespaces. for namespace, config := range byObject.Namespaces { // 1. Default from the undefaulted type-level config config = defaultConfig(config, byObjectToConfig(byObject)) @@ -461,14 +472,14 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { byObject.Namespaces[namespace] = config } - defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) - byObject.Label = defaultedConfig.LabelSelector - byObject.Field = defaultedConfig.FieldSelector - byObject.Transform = defaultedConfig.Transform - byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy - - if isNamespaced && byObject.Namespaces == nil { - byObject.Namespaces = opts.DefaultNamespaces + // Only default ByObject iself if it isn't namespaced or has no namespaces configured, as only + // then any of this will be honored. + if !isNamespaced || len(byObject.Namespaces) == 0 { + defaultedConfig := defaultConfig(byObjectToConfig(byObject), optionDefaultsToConfig(&opts)) + byObject.Label = defaultedConfig.LabelSelector + byObject.Field = defaultedConfig.FieldSelector + byObject.Transform = defaultedConfig.Transform + byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy } opts.ByObject[obj] = byObject @@ -498,20 +509,21 @@ func defaultConfig(toDefault, defaultFrom Config) Config { return toDefault } -func namespaceAllSelector(namespaces []string) fields.Selector { +func namespaceAllSelector(namespaces []string) []fields.Selector { selectors := make([]fields.Selector, 0, len(namespaces)-1) + sort.Strings(namespaces) for _, namespace := range namespaces { if namespace != metav1.NamespaceAll { selectors = append(selectors, fields.OneTermNotEqualSelector("metadata.namespace", namespace)) } } - return fields.AndSelectors(selectors...) + return selectors } -func appendIfNotNil[T comparable](a, b T) []T { +func appendIfNotNil[T comparable](a []T, b T) []T { if b != *new(T) { - return []T{a, b} + return append(a, b) } - return []T{a} + return a } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index 5af02063..927be22b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -53,7 +53,7 @@ func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTM // client for discovery information to do REST mappings. type mapper struct { mapper meta.RESTMapper - client *discovery.DiscoveryClient + client discovery.DiscoveryInterface knownGroups map[string]*restmapper.APIGroupResources apiGroups map[string]*metav1.APIGroup @@ -182,23 +182,28 @@ func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) er Group: metav1.APIGroup{Name: groupName}, VersionedResources: make(map[string][]metav1.APIResource), } - if _, ok := m.knownGroups[groupName]; ok { - groupResources = m.knownGroups[groupName] - } // Update information for group resources about versioned resources. // The number of API calls is equal to the number of versions: /apis//. - groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + // If we encounter a missing API version (NotFound error), we will remove the group from + // the m.apiGroups and m.knownGroups caches. + // If this happens, in the next call the group will be added back to apiGroups + // and only the existing versions will be loaded in knownGroups. + groupVersionResources, err := m.fetchGroupVersionResourcesLocked(groupName, versions...) if err != nil { return fmt.Errorf("failed to get API group resources: %w", err) } - for version, resources := range groupVersionResources { - groupResources.VersionedResources[version.Version] = resources.APIResources + + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] } // Update information for group resources about the API group by adding new versions. // Ignore the versions that are already registered. - for _, version := range versions { + for groupVersion, resources := range groupVersionResources { + version := groupVersion.Version + + groupResources.VersionedResources[version] = resources.APIResources found := false for _, v := range groupResources.Group.Versions { if v.Version == version { @@ -265,8 +270,9 @@ func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) return m.apiGroups[groupName], nil } -// fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +// fetchGroupVersionResourcesLocked fetches the resources for the specified group and its versions. +// This method might modify the cache so it needs to be called under the lock. +func (m *mapper) fetchGroupVersionResourcesLocked(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -274,9 +280,20 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string groupVersion := schema.GroupVersion{Group: groupName, Version: version} apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) - if err != nil && !apierrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { + // If the version is not found, we remove the group from the cache + // so it gets refreshed on the next call. + if m.isAPIGroupCached(groupVersion) { + delete(m.apiGroups, groupName) + } + if m.isGroupVersionCached(groupVersion) { + delete(m.knownGroups, groupName) + } + continue + } else if err != nil { failedGroups[groupVersion] = err } + if apiResourceList != nil { // even in case of error, some fallback might have been returned. groupVersionResources[groupVersion] = apiResourceList @@ -290,3 +307,29 @@ func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string return groupVersionResources, nil } + +// isGroupVersionCached checks if a version for a group is cached in the known groups cache. +func (m *mapper) isGroupVersionCached(gv schema.GroupVersion) bool { + if cachedGroup, ok := m.knownGroups[gv.Group]; ok { + _, cached := cachedGroup.VersionedResources[gv.Version] + return cached + } + + return false +} + +// isAPIGroupCached checks if a version for a group is cached in the api groups cache. +func (m *mapper) isAPIGroupCached(gv schema.GroupVersion) bool { + cachedGroup, ok := m.apiGroups[gv.Group] + if !ok { + return false + } + + for _, version := range cachedGroup.Versions { + if version.Version == gv.Version { + return true + } + } + + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index a16f354a..fdb9d982 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -518,6 +518,8 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e // Stop all the leader election runnables, which includes reconcilers. cm.logger.Info("Stopping and waiting for leader election runnables") + // Prevent leader election when shutting down a non-elected manager + cm.runnables.LeaderElection.startOnce.Do(func() {}) cm.runnables.LeaderElection.StopAndWait(cm.shutdownCtx) // Stop the caches before the leader election runnables, this is an important diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index 96566f5d..60609104 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -263,6 +263,15 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { r.start.Unlock() } + // Recheck if we're stopped and hold the readlock, given that the stop and start can be called + // at the same time, we can end up in a situation where the runnable is added + // after the group is stopped and the channel is closed. + r.stop.RLock() + defer r.stop.RUnlock() + if r.stopped { + return errRunnableGroupStopped + } + // Enqueue the runnable. r.ch <- readyRunnable return nil @@ -272,7 +281,11 @@ func (r *runnableGroup) Add(rn Runnable, ready runnableCheck) error { func (r *runnableGroup) StopAndWait(ctx context.Context) { r.stopOnce.Do(func() { // Close the reconciler channel once we're done. - defer close(r.ch) + defer func() { + r.stop.Lock() + close(r.ch) + r.stop.Unlock() + }() _ = r.Start(ctx) r.stop.Lock()