diff --git a/cmd/agent/agent.go b/cmd/agent/agent.go index 361058165..0c0090993 100644 --- a/cmd/agent/agent.go +++ b/cmd/agent/agent.go @@ -203,7 +203,8 @@ func startManager(o *options.AgentOptions, ctx context.Context) { o.EnableNodeCapacity) go resourceCollector.Start(ctx) - leaseUpdater := lease.NewLeaseUpdater(managementClusterKubeClient, AddonName, componentNamespace). + leaseUpdater := lease.NewLeaseUpdater(managementClusterKubeClient, AddonName, + componentNamespace, lease.CheckManagedClusterHealthFunc(managedClusterKubeClient.Discovery())). WithHubLeaseConfig(hubConfig, o.ClusterName) go leaseUpdater.Start(ctx) diff --git a/go.mod b/go.mod index 2a29ce452..b79bbbd0a 100644 --- a/go.mod +++ b/go.mod @@ -71,8 +71,8 @@ require ( k8s.io/klog/v2 v2.90.1 k8s.io/kube-aggregator v0.26.2 k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 - open-cluster-management.io/addon-framework v0.6.1 - open-cluster-management.io/api v0.10.1-0.20230404062739-ddf72e2f1bea + open-cluster-management.io/addon-framework v0.6.2-0.20230518083220-0994d42b557a + open-cluster-management.io/api v0.10.1-0.20230426130439-54a83a1650d6 sigs.k8s.io/controller-runtime v0.14.4 sigs.k8s.io/yaml v1.3.0 ) diff --git a/go.sum b/go.sum index 20a45ce20..8367dffa3 100644 --- a/go.sum +++ b/go.sum @@ -883,10 +883,10 @@ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY= k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -open-cluster-management.io/addon-framework v0.6.1 h1:gnBZaCRgtiPRjCBJoaRqMivajng/XOKp0NQhJUqLd+U= -open-cluster-management.io/addon-framework v0.6.1/go.mod h1:Uu4XC3Ec0ATS7U73PJtzAP4NCDfbDBVy1k5RUUwQDqY= -open-cluster-management.io/api v0.10.1-0.20230404062739-ddf72e2f1bea h1:E3BAlNqJohbxgkNkDguu95ET6nWd560D8HIl9eTIxAM= -open-cluster-management.io/api v0.10.1-0.20230404062739-ddf72e2f1bea/go.mod h1:WgKUCJ7+Bf40DsOmH1Gdkpyj3joco+QLzrlM6Ak39zE= +open-cluster-management.io/addon-framework v0.6.2-0.20230518083220-0994d42b557a h1:k2OCZ7tb2WirOjH3MOLf96U7I2r94E974msa2BCkWLk= +open-cluster-management.io/addon-framework v0.6.2-0.20230518083220-0994d42b557a/go.mod h1:kynUPV27PK84SKk5Kw91DJ93ldzLXV1r1MeRxH2x7Ls= +open-cluster-management.io/api v0.10.1-0.20230426130439-54a83a1650d6 h1:sViInPTDkS/jZS7SAmgZvgxP4EO+iC+uHKHPw1vTiqw= +open-cluster-management.io/api v0.10.1-0.20230426130439-54a83a1650d6/go.mod h1:WgKUCJ7+Bf40DsOmH1Gdkpyj3joco+QLzrlM6Ak39zE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/modules.txt b/vendor/modules.txt index 5367dde64..79db3fc43 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1460,24 +1460,26 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# open-cluster-management.io/addon-framework v0.6.1 +# open-cluster-management.io/addon-framework v0.6.2-0.20230518083220-0994d42b557a ## explicit; go 1.19 open-cluster-management.io/addon-framework/pkg/addonfactory open-cluster-management.io/addon-framework/pkg/addonmanager open-cluster-management.io/addon-framework/pkg/addonmanager/constants open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig -open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonhealthcheck open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate -open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/clustermanagement +open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration open-cluster-management.io/addon-framework/pkg/agent open-cluster-management.io/addon-framework/pkg/assets open-cluster-management.io/addon-framework/pkg/basecontroller/factory +open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/lease +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration +open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.10.1-0.20230404062739-ddf72e2f1bea +# open-cluster-management.io/api v0.10.1-0.20230426130439-54a83a1650d6 ## explicit; go 1.19 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go index 50cb99662..c8588ca49 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/addonfactory.go @@ -10,9 +10,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" ) const AddonDefaultInstallNamespace = "open-cluster-management-agent-addon" diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go index d1f1369db..b3d3f2abe 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go @@ -14,10 +14,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" ) // helmBuiltinValues includes the built-in values for helm agentAddon. diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go index a2102cd6b..5cc864c04 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/template_agentaddon.go @@ -6,11 +6,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/assets" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterv1 "open-cluster-management.io/api/cluster/v1" ) // templateBuiltinValues includes the built-in values for template agentAddon. diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml index f1ab6f272..55e70e1b8 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/crds/test_crd.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -13,10 +12,17 @@ spec: scope: Cluster preserveUnknownFields: false versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.addOnMeta.displayName + name: DISPLAY NAME + type: string + - jsonPath: .spec.addOnConfiguration.crdName + name: CRD NAME + type: string + name: v1alpha1 schema: openAPIV3Schema: - description: ClusterClaim represents cluster information that a managed cluster claims ClusterClaims with well known names include, 1. id.k8s.io, it contains a unique identifier for the cluster. 2. clusterset.k8s.io, it contains an identifier that relates the cluster to the ClusterSet in which it belongs. ClusterClaims created on a managed cluster will be collected and saved into the status of the corresponding ManagedCluster on hub. + description: ClusterManagementAddOn represents the registration of an add-on to the cluster manager. This resource allows the user to discover which add-on is available for the cluster manager and also provides metadata information about the add-on. This resource also provides a linkage to ManagedClusterAddOn, the name of the ClusterManagementAddOn resource will be used for the namespace-scoped ManagedClusterAddOn resource. ClusterManagementAddOn is a cluster-scoped resource. type: object properties: apiVersion: @@ -28,19 +34,162 @@ spec: metadata: type: object spec: - description: Spec defines the attributes of the ClusterClaim. + description: spec represents a desired configuration for the agent on the cluster management add-on. type: object properties: - value: - description: Value is a claim-dependent string - type: string - maxLength: 1024 - minLength: 1 + addOnConfiguration: + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' + type: object + properties: + crName: + description: crName is the name of the CR used to configure instances of the managed add-on. This field should be configured if add-on CR have a consistent name across the all of the ManagedCluster instaces. + type: string + crdName: + description: crdName is the name of the CRD used to configure instances of the managed add-on. This field should be configured if the add-on have a CRD that controls the configuration of the add-on. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the custom resource for the configuration of the addon. + type: integer + format: int64 + addOnMeta: + description: addOnMeta is a reference to the metadata information for the add-on. + type: object + properties: + description: + description: description represents the detailed description of the add-on. + type: string + displayName: + description: displayName represents the name of add-on that will be displayed. + type: string + installStrategy: + description: InstallStrategy represents that related ManagedClusterAddOns should be installed on certain clusters. + type: object + default: + type: Manual + properties: + placements: + description: Placements is a list of placement references honored when install strategy type is Placements. All clusters selected by these placements will install the addon If one cluster belongs to multiple placements, it will only apply the strategy defined later in the order. That is to say, The latter strategy overrides the previous one. + type: array + items: + type: object + required: + - name + - namespace + properties: + configs: + description: Configs is the configuration of managedClusterAddon during installation. User can override the configuration by updating the managedClusterAddon directly. + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + default: "" + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + name: + description: Name is the name of the placement + type: string + minLength: 1 + namespace: + description: Namespace is the namespace of the placement + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + type: + description: 'Type is the type of the install strategy, it can be: - Manual: no automatic install - Placements: install to clusters selected by placements.' + type: string + default: Manual + enum: + - Manual + - Placements + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + default: "" + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - group + - resource + x-kubernetes-list-type: map + status: + description: status represents the current status of cluster management add-on. + type: object + allOf: + - id: "abc" + items: + - schema: + description: test + oneOf: + - id: "abc" + patternProperties: + abc: + description: test + dependencies: + abc: + property: + - "abc" + definitions: + abc: + description: test + anyOf: + - id: "abc" + additionalProperties: + schema: + description: test + additionalItems: + schema: + description: test + not: + description: test served: true storage: true + subresources: + status: {} status: acceptedNames: kind: "" plural: "" - conditions: [ ] - storedVersions: [ ] + conditions: [] + storedVersions: [] diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml index bd2ec8905..465ceea67 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/testmanifests/chart/templates/clusterclaim_crd.yaml @@ -15,10 +15,17 @@ spec: scope: Cluster preserveUnknownFields: false versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .spec.addOnMeta.displayName + name: DISPLAY NAME + type: string + - jsonPath: .spec.addOnConfiguration.crdName + name: CRD NAME + type: string + name: v1alpha1 schema: openAPIV3Schema: - description: ClusterClaim represents cluster information that a managed cluster claims ClusterClaims with well known names include, 1. id.k8s.io, it contains a unique identifier for the cluster. 2. clusterset.k8s.io, it contains an identifier that relates the cluster to the ClusterSet in which it belongs. ClusterClaims created on a managed cluster will be collected and saved into the status of the corresponding ManagedCluster on hub. + description: ClusterManagementAddOn represents the registration of an add-on to the cluster manager. This resource allows the user to discover which add-on is available for the cluster manager and also provides metadata information about the add-on. This resource also provides a linkage to ManagedClusterAddOn, the name of the ClusterManagementAddOn resource will be used for the namespace-scoped ManagedClusterAddOn resource. ClusterManagementAddOn is a cluster-scoped resource. type: object properties: apiVersion: @@ -30,22 +37,166 @@ spec: metadata: type: object spec: - description: Spec defines the attributes of the ClusterClaim. + description: spec represents a desired configuration for the agent on the cluster management add-on. type: object properties: - value: - description: Value is a claim-dependent string - type: string - maxLength: 1024 - minLength: 1 + addOnConfiguration: + description: 'Deprecated: Use supportedConfigs filed instead addOnConfiguration is a reference to configuration information for the add-on. In scenario where a multiple add-ons share the same add-on CRD, multiple ClusterManagementAddOn resources need to be created and reference the same AddOnConfiguration.' + type: object + properties: + crName: + description: crName is the name of the CR used to configure instances of the managed add-on. This field should be configured if add-on CR have a consistent name across the all of the ManagedCluster instaces. + type: string + crdName: + description: crdName is the name of the CRD used to configure instances of the managed add-on. This field should be configured if the add-on have a CRD that controls the configuration of the add-on. + type: string + lastObservedGeneration: + description: lastObservedGeneration is the observed generation of the custom resource for the configuration of the addon. + type: integer + format: int64 + addOnMeta: + description: addOnMeta is a reference to the metadata information for the add-on. + type: object + properties: + description: + description: description represents the detailed description of the add-on. + type: string + displayName: + description: displayName represents the name of add-on that will be displayed. + type: string + installStrategy: + description: InstallStrategy represents that related ManagedClusterAddOns should be installed on certain clusters. + type: object + default: + type: Manual + properties: + placements: + description: Placements is a list of placement references honored when install strategy type is Placements. All clusters selected by these placements will install the addon If one cluster belongs to multiple placements, it will only apply the strategy defined later in the order. That is to say, The latter strategy overrides the previous one. + type: array + items: + type: object + required: + - name + - namespace + properties: + configs: + description: Configs is the configuration of managedClusterAddon during installation. User can override the configuration by updating the managedClusterAddon directly. + type: array + items: + type: object + required: + - name + - resource + properties: + group: + description: group of the add-on configuration. + type: string + default: "" + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + name: + description: Name is the name of the placement + type: string + minLength: 1 + namespace: + description: Namespace is the namespace of the placement + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + type: + description: 'Type is the type of the install strategy, it can be: - Manual: no automatic install - Placements: install to clusters selected by placements.' + type: string + default: Manual + enum: + - Manual + - Placements + supportedConfigs: + description: supportedConfigs is a list of configuration types supported by add-on. An empty list means the add-on does not require configurations. The default is an empty list + type: array + items: + description: ConfigMeta represents a collection of metadata information for add-on configuration. + type: object + required: + - resource + properties: + defaultConfig: + description: defaultConfig represents the namespace and name of the default add-on configuration. In scenario where all add-ons have a same configuration. + type: object + required: + - name + properties: + name: + description: name of the add-on configuration. + type: string + minLength: 1 + namespace: + description: namespace of the add-on configuration. If this field is not set, the configuration is in the cluster scope. + type: string + group: + description: group of the add-on configuration. + type: string + default: "" + resource: + description: resource of the add-on configuration. + type: string + minLength: 1 + x-kubernetes-list-map-keys: + - group + - resource + x-kubernetes-list-type: map + status: + description: status represents the current status of cluster management add-on. + type: object + allOf: + - id: "abc" + items: + - schema: + description: test + oneOf: + - id: "abc" + patternProperties: + abc: + description: test + dependencies: + abc: + property: + - "abc" + definitions: + abc: + description: test + anyOf: + - id: "abc" + additionalProperties: + schema: + description: test + additionalItems: + schema: + description: test + not: + description: test served: true storage: true + subresources: + status: {} status: acceptedNames: kind: "" plural: "" - conditions: [ ] - storedVersions: [ ] + conditions: [] + storedVersions: [] + {{ else }} --- diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go index 2e643d852..20ff065c5 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/trimcrds.go @@ -28,7 +28,9 @@ func trimCRDDescription(objects []runtime.Object) []runtime.Object { func trimCRDv1Description(crd *apiextensionsv1.CustomResourceDefinition) { versions := crd.Spec.Versions for i := range versions { - removeDescriptionV1(versions[i].Schema.OpenAPIV3Schema) + if versions[i].Schema != nil { + removeDescriptionV1(versions[i].Schema.OpenAPIV3Schema) + } } } @@ -70,7 +72,8 @@ func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { if len(p.Properties) != 0 { newProperties := map[string]apiextensionsv1.JSONSchemaProps{} - for k, v := range p.Properties { + for k := range p.Properties { + v := p.Properties[k] removeDescriptionV1(&v) newProperties[k] = v } @@ -79,7 +82,8 @@ func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { if len(p.PatternProperties) != 0 { newProperties := map[string]apiextensionsv1.JSONSchemaProps{} - for k, v := range p.PatternProperties { + for k := range p.PatternProperties { + v := p.PatternProperties[k] removeDescriptionV1(&v) newProperties[k] = v } @@ -92,7 +96,8 @@ func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { if len(p.Dependencies) != 0 { newDependencies := map[string]apiextensionsv1.JSONSchemaPropsOrStringArray{} - for k, v := range p.Dependencies { + for k := range p.Dependencies { + v := p.Dependencies[k] removeDescriptionV1(v.Schema) newDependencies[k] = v } @@ -105,7 +110,8 @@ func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { if len(p.Definitions) != 0 { newDefinitions := map[string]apiextensionsv1.JSONSchemaProps{} - for k, v := range p.Definitions { + for k := range p.Definitions { + v := p.Definitions[k] removeDescriptionV1(&v) newDefinitions[k] = v } @@ -121,7 +127,9 @@ func removeDescriptionV1(p *apiextensionsv1.JSONSchemaProps) { func trimCRDv1beta1Description(crd *apiextensionsv1beta1.CustomResourceDefinition) { versions := crd.Spec.Versions for i := range versions { - removeDescriptionV1beta1(versions[i].Schema.OpenAPIV3Schema) + if versions[i].Schema != nil { + removeDescriptionV1beta1(versions[i].Schema.OpenAPIV3Schema) + } } } @@ -163,7 +171,8 @@ func removeDescriptionV1beta1(p *apiextensionsv1beta1.JSONSchemaProps) { if len(p.Properties) != 0 { newProperties := map[string]apiextensionsv1beta1.JSONSchemaProps{} - for k, v := range p.Properties { + for k := range p.Properties { + v := p.Properties[k] removeDescriptionV1beta1(&v) newProperties[k] = v } @@ -172,7 +181,8 @@ func removeDescriptionV1beta1(p *apiextensionsv1beta1.JSONSchemaProps) { if len(p.PatternProperties) != 0 { newProperties := map[string]apiextensionsv1beta1.JSONSchemaProps{} - for k, v := range p.PatternProperties { + for k := range p.PatternProperties { + v := p.PatternProperties[k] removeDescriptionV1beta1(&v) newProperties[k] = v } @@ -198,7 +208,8 @@ func removeDescriptionV1beta1(p *apiextensionsv1beta1.JSONSchemaProps) { if len(p.Definitions) != 0 { newDefinitions := map[string]apiextensionsv1beta1.JSONSchemaProps{} - for k, v := range p.Definitions { + for k := range p.Definitions { + v := p.Definitions[k] removeDescriptionV1beta1(&v) newDefinitions[k] = v } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go index afbdd09ff..56e6f0169 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/constants/constants.go @@ -2,59 +2,16 @@ package constants import ( "fmt" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" ) const ( - // PreDeleteHookFinalizer is the finalizer for an addon which has deployed hook objects - PreDeleteHookFinalizer = "cluster.open-cluster-management.io/addon-pre-delete" - - // HostingPreDeleteHookFinalizer is the finalizer for an addon which has deployed hook objects on hosting cluster - HostingPreDeleteHookFinalizer = "cluster.open-cluster-management.io/hosting-addon-pre-delete" - - // AddonManifestApplied is a condition type representing whether the manifest of an addon - // is applied correctly. - AddonManifestApplied = "ManifestApplied" - - // AddonManifestAppliedReasonWorkApplyFailed is the reason of condition AddonManifestApplied indicating - // the failure of apply manifestwork of the manifests - AddonManifestAppliedReasonWorkApplyFailed = "ManifestWorkApplyFailed" - - // AddonManifestAppliedReasonManifestsApplied is the reason of condition AddonManifestApplied indicating - // the manifests is applied on the managedcluster. - AddonManifestAppliedReasonManifestsApplied = "AddonManifestApplied" - - // AddonManifestAppliedReasonManifestsApplyFailed is the reason of condition AddonManifestApplied indicating - // the failure to apply manifests on the managedcluster - AddonManifestAppliedReasonManifestsApplyFailed = "AddonManifestAppliedFailed" - - // AddonHookManifestCompleted is a condition type representing whether the addon hook is completed. - AddonHookManifestCompleted = "HookManifestCompleted" - // InstallModeBuiltinValueKey is the key of the build in value to represent the addon install mode, addon developers // can use this built in value in manifests. InstallModeBuiltinValueKey = "InstallMode" InstallModeHosted = "Hosted" InstallModeDefault = "Default" - - // HostingManifestFinalizer is the finalizer for an addon which has deployed manifests on the external - // hosting cluster in Hosted mode - HostingManifestFinalizer = "cluster.open-cluster-management.io/hosting-manifests-cleanup" - - // AddonHostingManifestApplied is a condition type representing whether the manifest of an addon - // is applied on the hosting cluster correctly. - AddonHostingManifestApplied = "HostingManifestApplied" - - // HostingClusterValid is a condition type representing whether the hosting cluster is valid in Hosted mode - HostingClusterValidity = "HostingClusterValidity" - - // HostingClusterValidityReasonValid is the reason of condition HostingClusterValidity indicating the hosting - // cluster is valid - HostingClusterValidityReasonValid = "HostingClusterValid" - - // HostingClusterValidityReasonInvalid is the reason of condition HostingClusterValidity indicating the hosting - // cluster is invalid - HostingClusterValidityReasonInvalid = "HostingClusterInvalid" ) // DeployWorkNamePrefix returns the prefix of the work name for the addon diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go index e07017cc3..65620b1e5 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go @@ -19,11 +19,13 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) const ( @@ -38,7 +40,7 @@ type addonConfigController struct { addonClient addonv1alpha1client.Interface addonLister addonlisterv1alpha1.ManagedClusterAddOnLister addonIndexer cache.Indexer - configListers map[string]dynamiclister.Lister + configListers map[schema.GroupResource]dynamiclister.Lister queue workqueue.RateLimitingInterface } @@ -54,7 +56,7 @@ func NewAddonConfigController( addonClient: addonClient, addonLister: addonInformers.Lister(), addonIndexer: addonInformers.Informer().GetIndexer(), - configListers: map[string]dynamiclister.Lister{}, + configListers: map[schema.GroupResource]dynamiclister.Lister{}, queue: syncCtx.Queue(), } @@ -95,20 +97,20 @@ func (c *addonConfigController) buildConfigInformers( utilruntime.HandleError(err) } configInformers = append(configInformers, indexInformer) - c.configListers[toListerKey(gvr.Group, gvr.Resource)] = dynamiclister.New(indexInformer.GetIndexer(), gvr) + c.configListers[schema.GroupResource{Group: gvr.Group, Resource: gvr.Resource}] = dynamiclister.New(indexInformer.GetIndexer(), gvr) } return configInformers } func (c *addonConfigController) enqueueAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { return func(obj interface{}) { - name, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + namespaceName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("error to get accessor of object: %v", obj)) return } - objs, err := c.addonIndexer.ByIndex(byAddOnConfig, fmt.Sprintf("%s/%s/%s", gvr.Group, gvr.Resource, name)) + objs, err := c.addonIndexer.ByIndex(byAddOnConfig, fmt.Sprintf("%s/%s/%s", gvr.Group, gvr.Resource, namespaceName)) if err != nil { utilruntime.HandleError(fmt.Errorf("error to get addons: %v", err)) return @@ -118,9 +120,7 @@ func (c *addonConfigController) enqueueAddOnsByConfig(gvr schema.GroupVersionRes if obj == nil { continue } - - addon := obj.(*addonapiv1alpha1.ManagedClusterAddOn) - key, _ := cache.MetaNamespaceKeyFunc(addon) + key, _ := cache.MetaNamespaceKeyFunc(obj) c.queue.Add(key) } } @@ -132,11 +132,6 @@ func (c *addonConfigController) indexByConfig(obj interface{}) ([]string, error) return nil, fmt.Errorf("obj is supposed to be a ManagedClusterAddOn, but is %T", obj) } - if len(addon.Status.ConfigReferences) == 0 { - // no config references, ignore - return nil, nil - } - configNames := []string{} for _, configReference := range addon.Status.ConfigReferences { if configReference.Name == "" { @@ -168,34 +163,30 @@ func (c *addonConfigController) sync(ctx context.Context, syncCtx factory.SyncCo addonCopy := addon.DeepCopy() - if err := c.updateConfigGenerations(addonCopy); err != nil { + if err := c.updateConfigSpecHashAndGenerations(addonCopy); err != nil { return err } return c.patchConfigReferences(ctx, addon, addonCopy) } -func (c *addonConfigController) updateConfigGenerations(addon *addonapiv1alpha1.ManagedClusterAddOn) error { - if len(addon.Status.ConfigReferences) == 0 { - // no config references, ignore - return nil +func (c *addonConfigController) updateConfigSpecHashAndGenerations(addon *addonapiv1alpha1.ManagedClusterAddOn) error { + supportedConfigSet := map[addonapiv1alpha1.ConfigGroupResource]bool{} + for _, config := range addon.Status.SupportedConfigs { + supportedConfigSet[config] = true } - for index, configReference := range addon.Status.ConfigReferences { - lister, ok := c.configListers[toListerKey(configReference.Group, configReference.Resource)] + lister, ok := c.configListers[schema.GroupResource{Group: configReference.ConfigGroupResource.Group, Resource: configReference.ConfigGroupResource.Resource}] if !ok { continue } - namespace := configReference.ConfigReferent.Namespace - name := configReference.ConfigReferent.Name - var config *unstructured.Unstructured var err error - if namespace == "" { - config, err = lister.Get(name) + if configReference.Namespace == "" { + config, err = lister.Get(configReference.Name) } else { - config, err = lister.Namespace(namespace).Get(name) + config, err = lister.Namespace(configReference.Namespace).Get(configReference.Name) } if errors.IsNotFound(err) { @@ -206,9 +197,26 @@ func (c *addonConfigController) updateConfigGenerations(addon *addonapiv1alpha1. return err } - // TODO if config is configmap or secret, the generation will not be increased automatically, - // we may need to consider how to handle this in the future + // update LastObservedGeneration for all the configs in status addon.Status.ConfigReferences[index].LastObservedGeneration = config.GetGeneration() + + // update desired spec hash only for the configs in spec + for _, addonconfig := range addon.Spec.Configs { + // do not update spec hash for unsupported configs + if _, ok := supportedConfigSet[addonconfig.ConfigGroupResource]; !ok { + continue + } + if configReference.DesiredConfig == nil { + continue + } + if configReference.ConfigGroupResource == addonconfig.ConfigGroupResource && configReference.DesiredConfig.ConfigReferent == addonconfig.ConfigReferent { + specHash, err := managementaddonconfig.GetSpecHash(config) + if err != nil { + return err + } + addon.Status.ConfigReferences[index].DesiredConfig.SpecHash = specHash + } + } } return nil @@ -265,7 +273,3 @@ func getIndex(config addonapiv1alpha1.ConfigReference) string { return fmt.Sprintf("%s/%s/%s", config.Group, config.Resource, config.Name) } - -func toListerKey(group, resource string) string { - return fmt.Sprintf("%s/%s", group, resource) -} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonhealthcheck/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonhealthcheck/controller.go deleted file mode 100644 index 82f3c89fd..000000000 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonhealthcheck/controller.go +++ /dev/null @@ -1,275 +0,0 @@ -package addonhealthcheck - -import ( - "context" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" - "open-cluster-management.io/addon-framework/pkg/utils" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" - addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" - addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" - workinformers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" - worklister "open-cluster-management.io/api/client/work/listers/work/v1" - workapiv1 "open-cluster-management.io/api/work/v1" -) - -// addonHealthCheckController reconciles instances of ManagedClusterAddon on the hub. -// TODO: consider health check in Hosted mode. -type addonHealthCheckController struct { - addonClient addonv1alpha1client.Interface - managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister - workLister worklister.ManifestWorkLister - agentAddons map[string]agent.AgentAddon -} - -func NewAddonHealthCheckController( - addonClient addonv1alpha1client.Interface, - addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, - workInformers workinformers.ManifestWorkInformer, - agentAddons map[string]agent.AgentAddon, -) factory.Controller { - c := &addonHealthCheckController{ - addonClient: addonClient, - managedClusterAddonLister: addonInformers.Lister(), - workLister: workInformers.Lister(), - agentAddons: agentAddons, - } - - return factory.New().WithFilteredEventsInformersQueueKeysFunc( - func(obj runtime.Object) []string { - key, _ := cache.MetaNamespaceKeyFunc(obj) - return []string{key} - }, - func(obj interface{}) bool { - accessor, _ := meta.Accessor(obj) - if _, ok := c.agentAddons[accessor.GetName()]; !ok { - return false - } - return true - }, - addonInformers.Informer()). - WithFilteredEventsInformersQueueKeysFunc( - func(obj runtime.Object) []string { - accessor, _ := meta.Accessor(obj) - return []string{fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey])} - }, - func(obj interface{}) bool { - accessor, _ := meta.Accessor(obj) - if accessor.GetLabels() == nil { - return false - } - - addonName, ok := accessor.GetLabels()[addonapiv1alpha1.AddonLabelKey] - if !ok { - return false - } - - if _, ok := c.agentAddons[addonName]; !ok { - return false - } - if !strings.HasPrefix(accessor.GetName(), constants.DeployWorkNamePrefix(addonName)) { - return false - } - return true - }, - workInformers.Informer(), - ). - WithSync(c.sync). - ToController("addon-healthcheck-controller") -} - -func (c *addonHealthCheckController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { - clusterName, addonName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - // ignore addon whose key is not in format: namespace/name - return nil - } - - klog.V(4).Infof("Reconciling addon health checker on cluster %q", clusterName) - managedClusterAddon, err := c.managedClusterAddonLister.ManagedClusterAddOns(clusterName).Get(addonName) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - - agentAddon := c.agentAddons[addonName] - if agentAddon == nil { - return nil - } - - return c.syncAddonHealthChecker(ctx, managedClusterAddon, agentAddon) -} - -func (c *addonHealthCheckController) syncAddonHealthChecker(ctx context.Context, addon *addonapiv1alpha1.ManagedClusterAddOn, agentAddon agent.AgentAddon) error { - // for in-place edit - addon = addon.DeepCopy() - // reconcile health check mode - var expectedHealthCheckMode addonapiv1alpha1.HealthCheckMode - - if agentAddon.GetAgentAddonOptions().HealthProber == nil { - return nil - } - - switch agentAddon.GetAgentAddonOptions().HealthProber.Type { - case agent.HealthProberTypeWork: - fallthrough - case agent.HealthProberTypeNone: - expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeCustomized - case agent.HealthProberTypeLease: - fallthrough - default: - expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease - } - - if expectedHealthCheckMode != addon.Status.HealthCheck.Mode { - addon.Status.HealthCheck.Mode = expectedHealthCheckMode - _, err := c.addonClient.AddonV1alpha1().ManagedClusterAddOns(addon.Namespace). - UpdateStatus(ctx, addon, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - - return c.probeAddonStatus(ctx, addon, agentAddon) -} - -func (c *addonHealthCheckController) probeAddonStatus(ctx context.Context, addon *addonapiv1alpha1.ManagedClusterAddOn, agentAddon agent.AgentAddon) error { - addonCopy := addon.DeepCopy() - - if agentAddon.GetAgentAddonOptions().HealthProber == nil { - return nil - } - - if agentAddon.GetAgentAddonOptions().HealthProber.Type != agent.HealthProberTypeWork { - return nil - } - - requirement, _ := labels.NewRequirement(addonapiv1alpha1.AddonLabelKey, selection.Equals, []string{addon.Name}) - selector := labels.NewSelector().Add(*requirement) - - addonWorks, err := c.workLister.ManifestWorks(addon.Namespace).List(selector) - if err != nil || len(addonWorks) == 0 { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionUnknown, - Reason: "WorkNotFound", - Message: "Work for addon is not found", - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - - manifestConditions := []workapiv1.ManifestCondition{} - for _, work := range addonWorks { - if !strings.HasPrefix(work.Name, constants.DeployWorkNamePrefix(addon.Name)) { - continue - } - // Check the overall work available condition at first. - workCond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable) - switch { - case workCond == nil: - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionUnknown, - Reason: "WorkNotApplied", - Message: "Work is not applied yet", - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - case workCond.Status == metav1.ConditionFalse: - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionFalse, - Reason: "WorkApplyFailed", - Message: workCond.Message, - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - - manifestConditions = append(manifestConditions, work.Status.ResourceStatus.Manifests...) - } - - if agentAddon.GetAgentAddonOptions().HealthProber.WorkProber == nil { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionTrue, - Reason: "WorkApplied", - Message: "Addon work is applied", - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - - probeFields := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields - - for _, field := range probeFields { - result := findResultByIdentifier(field.ResourceIdentifier, manifestConditions) - // if no results are returned. it is possible that work agent has not returned the feedback value. - // mark condition to unknown - if result == nil { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionUnknown, - Reason: "NoProbeResult", - Message: "Probe results are not returned", - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - - err := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.HealthCheck(field.ResourceIdentifier, *result) - if err != nil { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionFalse, - Reason: "ProbeUnavailable", - Message: fmt.Sprintf("Probe addon unavailable with err %v", err), - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - } - - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: "Available", - Status: metav1.ConditionTrue, - Reason: "ProbeAvailable", - Message: "Addon is available", - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) -} - -func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { - for _, status := range manifestConditions { - if identifier.Group != status.ResourceMeta.Group { - continue - } - if identifier.Resource != status.ResourceMeta.Resource { - continue - } - if identifier.Name != status.ResourceMeta.Name { - continue - } - if identifier.Namespace != status.ResourceMeta.Namespace { - continue - } - - if len(status.StatusFeedbacks.Values) == 0 { - return nil - } - - return &status.StatusFeedbacks - } - - return nil -} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go index 25f4b0bce..4c57759e7 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall/controller.go @@ -10,14 +10,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" errorsutil "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) // managedClusterController reconciles instances of ManagedCluster on the hub. diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go index 84eb81b9c..c02986124 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go @@ -2,21 +2,21 @@ package agentdeploy import ( "context" + "encoding/json" "fmt" "strings" + jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" errorsutil "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" - "open-cluster-management.io/addon-framework/pkg/utils" + "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" @@ -29,6 +29,10 @@ import ( "open-cluster-management.io/api/utils/work/v1/workapplier" "open-cluster-management.io/api/utils/work/v1/workbuilder" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) // addonDeployController deploy addon agent resources on the managed cluster. @@ -168,6 +172,11 @@ func (c *addonDeployController) sync(ctx context.Context, syncCtx factory.SyncCo return err } + // to deploy agents if there is RegistrationApplied condition. + if meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied) == nil { + return nil + } + cluster, err := c.managedClusterLister.Get(clusterName) if errors.IsNotFound(err) { // the managedCluster is nil in this case,and sync cannot handle nil managedCluster. @@ -204,6 +213,10 @@ func (c *addonDeployController) sync(ctx context.Context, syncCtx factory.SyncCo getCluster: c.managedClusterLister.Get, getWorkByAddon: c.getWorksByAddonFn(hookByHostedAddon), agentAddon: agentAddon}, + &healthCheckSyncer{ + getWorkByAddon: c.getWorksByAddonFn(byAddon), + agentAddon: agentAddon, + }, } oldAddon := addon @@ -231,7 +244,44 @@ func (c *addonDeployController) updateAddon(ctx context.Context, new, old *addon return err } - return utils.PatchAddonCondition(ctx, c.addonClient, new, old) + if equality.Semantic.DeepEqual(new.Status.HealthCheck, old.Status.HealthCheck) && + equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + HealthCheck: old.Status.HealthCheck, + Conditions: old.Status.Conditions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ + HealthCheck: new.Status.HealthCheck, + Conditions: new.Status.Conditions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s condition with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err } func (c *addonDeployController) applyWork(ctx context.Context, appliedType string, @@ -242,28 +292,33 @@ func (c *addonDeployController) applyWork(ctx context.Context, appliedType strin meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonWorkApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, Message: fmt.Sprintf("failed to apply manifestWork: %v", err), }) return work, err } // Update addon status based on work's status - if meta.IsStatusConditionTrue(work.Status.Conditions, workapiv1.WorkApplied) { + WorkAppliedCond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkApplied) + switch { + case WorkAppliedCond == nil: + return work, nil + case WorkAppliedCond.Status == metav1.ConditionTrue: meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionTrue, - Reason: constants.AddonManifestAppliedReasonManifestsApplied, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplied, Message: "manifests of addon are applied successfully", }) - } else { + default: meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonManifestsApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonManifestsApplyFailed, Message: "failed to apply the manifests of addon", }) } + return work, nil } @@ -280,10 +335,10 @@ func (c *addonDeployController) buildDeployManifestWorks(installMode, workNamesp switch installMode { case constants.InstallModeHosted: - appliedType = constants.AddonHostingManifestApplied + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) case constants.InstallModeDefault: - appliedType = constants.AddonManifestApplied + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) default: return nil, nil, fmt.Errorf("invalid install mode %v", installMode) @@ -294,7 +349,7 @@ func (c *addonDeployController) buildDeployManifestWorks(installMode, workNamesp meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonWorkApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), }) return nil, nil, err @@ -313,7 +368,7 @@ func (c *addonDeployController) buildDeployManifestWorks(installMode, workNamesp meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonWorkApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, Message: fmt.Sprintf("failed to build manifestwork: %v", err), }) return nil, nil, err @@ -332,10 +387,10 @@ func (c *addonDeployController) buildHookManifestWork(installMode, workNamespace switch installMode { case constants.InstallModeHosted: - appliedType = constants.AddonHostingManifestApplied + appliedType = addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied addonWorkBuilder = newHostingAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) case constants.InstallModeDefault: - appliedType = constants.AddonManifestApplied + appliedType = addonapiv1alpha1.ManagedClusterAddOnManifestApplied addonWorkBuilder = newAddonWorksBuilder(agentAddon.GetAgentAddonOptions().HostedModeEnabled, c.workBuilder) default: return nil, fmt.Errorf("invalid install mode %v", installMode) @@ -346,7 +401,7 @@ func (c *addonDeployController) buildHookManifestWork(installMode, workNamespace meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonWorkApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, Message: fmt.Sprintf("failed to get manifest from agent interface: %v", err), }) return nil, err @@ -360,7 +415,7 @@ func (c *addonDeployController) buildHookManifestWork(installMode, workNamespace meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ Type: appliedType, Status: metav1.ConditionFalse, - Reason: constants.AddonManifestAppliedReasonWorkApplyFailed, + Reason: addonapiv1alpha1.AddonManifestAppliedReasonWorkApplyFailed, Message: fmt.Sprintf("failed to build manifestwork: %v", err), }) return nil, err diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go index e81fe01c7..bcca12205 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_hook_sync.go @@ -6,12 +6,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type defaultHookSyncer struct { @@ -34,11 +35,11 @@ func (s *defaultHookSyncer) sync(ctx context.Context, } if hookWork == nil { - addonRemoveFinalizer(addon, constants.PreDeleteHookFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) return addon, nil } - if addonAddFinalizer(addon, constants.PreDeleteHookFinalizer) { + if addonAddFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) { return addon, nil } @@ -47,7 +48,7 @@ func (s *defaultHookSyncer) sync(ctx context.Context, } // will deploy the pre-delete hook manifestWork when the addon is deleting - hookWork, err = s.applyWork(ctx, constants.AddonManifestApplied, hookWork, addon) + hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnManifestApplied, hookWork, addon) if err != nil { return addon, err } @@ -55,18 +56,18 @@ func (s *defaultHookSyncer) sync(ctx context.Context, // TODO: will surface more message here if hookWorkIsCompleted(hookWork) { meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.AddonHookManifestCompleted, + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, Status: metav1.ConditionTrue, Reason: "HookManifestIsCompleted", Message: fmt.Sprintf("hook manifestWork %v is completed.", hookWork.Name), }) - addonRemoveFinalizer(addon, constants.PreDeleteHookFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonPreDeleteHookFinalizer) return addon, nil } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.AddonHookManifestCompleted, + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, Status: metav1.ConditionFalse, Reason: "HookManifestIsNotCompleted", Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go index 9ce83c74a..5ffa6bd4d 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/default_sync.go @@ -4,12 +4,13 @@ import ( "context" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type defaultSyncer struct { @@ -62,7 +63,7 @@ func (s *defaultSyncer) sync(ctx context.Context, } for _, deployWork := range deployWorks { - _, err = s.applyWork(ctx, constants.AddonManifestApplied, deployWork, addon) + _, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnManifestApplied, deployWork, addon) if err != nil { errs = append(errs, err) } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go new file mode 100644 index 000000000..97ffe032f --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/healthcheck_sync.go @@ -0,0 +1,172 @@ +package agentdeploy + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +type healthCheckSyncer struct { + getWorkByAddon func(addonName, addonNamespace string) ([]*workapiv1.ManifestWork, error) + agentAddon agent.AgentAddon +} + +func (s *healthCheckSyncer) sync(ctx context.Context, + syncCtx factory.SyncContext, + cluster *clusterv1.ManagedCluster, + addon *addonapiv1alpha1.ManagedClusterAddOn) (*addonapiv1alpha1.ManagedClusterAddOn, error) { + // reconcile health check mode + var expectedHealthCheckMode addonapiv1alpha1.HealthCheckMode + + if s.agentAddon.GetAgentAddonOptions().HealthProber == nil { + return addon, nil + } + + switch s.agentAddon.GetAgentAddonOptions().HealthProber.Type { + case agent.HealthProberTypeWork, agent.HealthProberTypeNone: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeCustomized + case agent.HealthProberTypeLease: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease + default: + expectedHealthCheckMode = addonapiv1alpha1.HealthCheckModeLease + } + + if expectedHealthCheckMode != addon.Status.HealthCheck.Mode { + addon.Status.HealthCheck.Mode = expectedHealthCheckMode + } + + err := s.probeAddonStatus(addon) + return addon, err +} + +func (s *healthCheckSyncer) probeAddonStatus(addon *addonapiv1alpha1.ManagedClusterAddOn) error { + if s.agentAddon.GetAgentAddonOptions().HealthProber.Type != agent.HealthProberTypeWork { + return nil + } + + if s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkApply, + Message: "Addon manifestWork is applied", + }) + return nil + } + + // update Available condition after addon manifestWorks are applied + if meta.FindStatusCondition(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnManifestApplied) == nil { + return nil + } + + addonWorks, err := s.getWorkByAddon(addon.Name, addon.Namespace) + if err != nil || len(addonWorks) == 0 { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotFound, + Message: "Addon manifestWork is not found", + }) + return err + } + + manifestConditions := []workapiv1.ManifestCondition{} + for _, work := range addonWorks { + if !strings.HasPrefix(work.Name, constants.DeployWorkNamePrefix(addon.Name)) { + continue + } + // Check the overall work available condition at first. + workCond := meta.FindStatusCondition(work.Status.Conditions, workapiv1.WorkAvailable) + switch { + case workCond == nil: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotApply, + Message: "Addon manifestWork is not applied yet", + }) + return nil + case workCond.Status == metav1.ConditionFalse: + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonWorkNotApply, + Message: workCond.Message, + }) + return nil + } + + manifestConditions = append(manifestConditions, work.Status.ResourceStatus.Manifests...) + } + + probeFields := s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields + + for _, field := range probeFields { + result := findResultByIdentifier(field.ResourceIdentifier, manifestConditions) + // if no results are returned. it is possible that work agent has not returned the feedback value. + // mark condition to unknown + if result == nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionUnknown, + Reason: addonapiv1alpha1.AddonAvailableReasonNoProbeResult, + Message: "Probe results are not returned", + }) + return nil + } + + err := s.agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.HealthCheck(field.ResourceIdentifier, *result) + if err != nil { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeUnavailable, + Message: fmt.Sprintf("Probe addon unavailable with err %v", err), + }) + return nil + } + } + + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnConditionAvailable, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.AddonAvailableReasonProbeAvailable, + Message: "Addon is available", + }) + return nil +} + +func findResultByIdentifier(identifier workapiv1.ResourceIdentifier, manifestConditions []workapiv1.ManifestCondition) *workapiv1.StatusFeedbackResult { + for _, status := range manifestConditions { + if identifier.Group != status.ResourceMeta.Group { + continue + } + if identifier.Resource != status.ResourceMeta.Resource { + continue + } + if identifier.Name != status.ResourceMeta.Name { + continue + } + if identifier.Namespace != status.ResourceMeta.Namespace { + continue + } + + if len(status.StatusFeedbacks.Values) == 0 { + return nil + } + + return &status.StatusFeedbacks + } + + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go index 7caac209b..9f628166b 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go @@ -8,12 +8,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type hostedHookSyncer struct { @@ -55,7 +56,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, return addon, err } - addonRemoveFinalizer(addon, constants.HostingPreDeleteHookFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } if err != nil { @@ -66,7 +67,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, if err = s.cleanupHookWork(ctx, addon); err != nil { return addon, err } - addonRemoveFinalizer(addon, constants.HostingPreDeleteHookFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } hookWork, err := s.buildWorks(constants.InstallModeHosted, hostingClusterName, cluster, addon) @@ -75,22 +76,22 @@ func (s *hostedHookSyncer) sync(ctx context.Context, } if hookWork == nil { - addonRemoveFinalizer(addon, constants.HostingPreDeleteHookFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } // will deploy the pre-delete hook manifestWork when the addon is deleting if addon.DeletionTimestamp.IsZero() { - addonAddFinalizer(addon, constants.HostingPreDeleteHookFinalizer) + addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } // the hook work is completed if there is no HostingPreDeleteHookFinalizer when the addon is deleting. - if !addonHasFinalizer(addon, constants.HostingPreDeleteHookFinalizer) { + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { return addon, nil } - hookWork, err = s.applyWork(ctx, constants.AddonHostingManifestApplied, hookWork, addon) + hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, hookWork, addon) if err != nil { return addon, err } @@ -98,7 +99,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, // TODO: will surface more message here if hookWorkIsCompleted(hookWork) { meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.AddonHookManifestCompleted, + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, Status: metav1.ConditionTrue, Reason: "HookManifestIsCompleted", Message: fmt.Sprintf("hook manifestWork %v is completed.", hookWork.Name), @@ -107,14 +108,14 @@ func (s *hostedHookSyncer) sync(ctx context.Context, if err = s.cleanupHookWork(ctx, addon); err != nil { return addon, err } - if addonRemoveFinalizer(addon, constants.HostingPreDeleteHookFinalizer) { + if addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { return addon, err } return addon, nil } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.AddonHookManifestCompleted, + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, Status: metav1.ConditionFalse, Reason: "HookManifestIsNotCompleted", Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), @@ -128,7 +129,7 @@ func (s *hostedHookSyncer) sync(ctx context.Context, // if the hostingClusterName is empty, will try to find out the hosting cluster by manifestWork labels and do the cleanup func (s *hostedHookSyncer) cleanupHookWork(ctx context.Context, addon *addonapiv1alpha1.ManagedClusterAddOn) (err error) { - if !addonHasFinalizer(addon, constants.HostingPreDeleteHookFinalizer) { + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { return nil } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go index 7cedb09e0..4da04d225 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_sync.go @@ -8,12 +8,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) type hostedSyncer struct { @@ -47,7 +48,7 @@ func (s *hostedSyncer) sync(ctx context.Context, if err := s.cleanupDeployWork(ctx, addon); err != nil { return addon, err } - addonRemoveFinalizer(addon, constants.HostingManifestFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) return addon, nil } @@ -60,22 +61,22 @@ func (s *hostedSyncer) sync(ctx context.Context, } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.HostingClusterValidity, + Type: addonapiv1alpha1.ManagedClusterAddOnHostingClusterValidity, Status: metav1.ConditionFalse, - Reason: constants.HostingClusterValidityReasonInvalid, + Reason: addonapiv1alpha1.HostingClusterValidityReasonInvalid, Message: fmt.Sprintf("hosting cluster %s is not a managed cluster of the hub", hostingClusterName), }) - addonRemoveFinalizer(addon, constants.HostingManifestFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) return addon, nil } if err != nil { return addon, err } meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: constants.HostingClusterValidity, + Type: addonapiv1alpha1.ManagedClusterAddOnHostingClusterValidity, Status: metav1.ConditionTrue, - Reason: constants.HostingClusterValidityReasonValid, + Reason: addonapiv1alpha1.HostingClusterValidityReasonValid, Message: fmt.Sprintf("hosting cluster %s is a managed cluster of the hub", hostingClusterName), }) @@ -83,24 +84,24 @@ func (s *hostedSyncer) sync(ctx context.Context, if err = s.cleanupDeployWork(ctx, addon); err != nil { return addon, err } - addonRemoveFinalizer(addon, constants.HostingManifestFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) return addon, nil } if !addon.DeletionTimestamp.IsZero() { // clean up the deploy work until the hook work is completed - if addonHasFinalizer(addon, constants.HostingPreDeleteHookFinalizer) { + if addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { return addon, nil } if err = s.cleanupDeployWork(ctx, addon); err != nil { return addon, err } - addonRemoveFinalizer(addon, constants.HostingManifestFinalizer) + addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) return addon, nil } - if addonAddFinalizer(addon, constants.HostingManifestFinalizer) { + if addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) { return addon, nil } @@ -129,7 +130,7 @@ func (s *hostedSyncer) sync(ctx context.Context, } for _, deployWork := range deployWorks { - _, err = s.applyWork(ctx, constants.AddonHostingManifestApplied, deployWork, addon) + _, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, deployWork, addon) if err != nil { errs = append(errs, err) } @@ -142,7 +143,7 @@ func (s *hostedSyncer) sync(ctx context.Context, // to find out the hosting cluster by manifestWork labels and do the cleanup. func (s *hostedSyncer) cleanupDeployWork(ctx context.Context, addon *addonapiv1alpha1.ManagedClusterAddOn) (err error) { - if !addonHasFinalizer(addon, constants.HostingManifestFinalizer) { + if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingManifestFinalizer) { return nil } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/indexes.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/indexes.go index 90f52eaa2..de8a5834e 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/indexes.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/indexes.go @@ -2,11 +2,12 @@ package agentdeploy import ( "fmt" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" "strings" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" ) const ( diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go index 1ad54d5c2..b550cae6e 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -1,6 +1,7 @@ package agentdeploy import ( + "encoding/json" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -8,11 +9,12 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" - "open-cluster-management.io/addon-framework/pkg/agent" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" "open-cluster-management.io/api/utils/work/v1/workbuilder" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" + "open-cluster-management.io/addon-framework/pkg/agent" ) func addonHasFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { @@ -27,9 +29,16 @@ func addonHasFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer st func addonRemoveFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { var rst []string for _, f := range addon.Finalizers { - if f != finalizer { - rst = append(rst, f) + if f == finalizer { + continue + } + // remove deperecated finalizers also + if f == addonapiv1alpha1.AddonDeprecatedHostingManifestFinalizer || + f == addonapiv1alpha1.AddonDeprecatedPreDeleteHookFinalizer || + f == addonapiv1alpha1.AddonDeprecatedHostingPreDeleteHookFinalizer { + continue } + rst = append(rst, f) } if len(rst) != len(addon.Finalizers) { addon.SetFinalizers(rst) @@ -39,12 +48,22 @@ func addonRemoveFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer } func addonAddFinalizer(addon *addonapiv1alpha1.ManagedClusterAddOn, finalizer string) bool { - rst := addon.Finalizers - if rst == nil { + if addon.Finalizers == nil { addon.SetFinalizers([]string{finalizer}) return true } + var rst []string + for _, f := range addon.Finalizers { + // remove deperecated finalizers also + if f == addonapiv1alpha1.AddonDeprecatedHostingManifestFinalizer || + f == addonapiv1alpha1.AddonDeprecatedPreDeleteHookFinalizer || + f == addonapiv1alpha1.AddonDeprecatedHostingPreDeleteHookFinalizer { + continue + } + rst = append(rst, f) + } + for _, f := range addon.Finalizers { if f == finalizer { return false @@ -292,10 +311,16 @@ func (b *addonWorksBuilder) BuildDeployWorks(addonWorkNamespace string, } } + annotations, err := configsToAnnotations(addon.Status.ConfigReferences) + if err != nil { + return nil, nil, err + } + return b.workBuilder.Build(deployObjects, newAddonWorkObjectMeta(b.processor.manifestWorkNamePrefix(addon.Namespace, addon.Name), addon.Name, addon.Namespace, addonWorkNamespace, owner), workbuilder.ExistingManifestWorksOption(existingWorks), workbuilder.ManifestConfigOption(manifestOptions), + workbuilder.ManifestAnnotations(annotations), workbuilder.DeletionOption(deletionOption)) } @@ -424,7 +449,8 @@ func hookWorkIsCompleted(hookWork *workapiv1.ManifestWork) bool { return true } -func newAddonWorkObjectMeta(namePrefix, addonName, addonNamespace, workNamespace string, owner *metav1.OwnerReference) workbuilder.GenerateManifestWorkObjectMeta { +func newAddonWorkObjectMeta(namePrefix, addonName, addonNamespace, workNamespace string, + owner *metav1.OwnerReference) workbuilder.GenerateManifestWorkObjectMeta { return func(index int) metav1.ObjectMeta { objectMeta := metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", namePrefix, index), @@ -448,26 +474,29 @@ func newAddonWorkObjectMeta(namePrefix, addonName, addonNamespace, workNamespace } func getManifestConfigOption(agentAddon agent.AgentAddon) []workapiv1.ManifestConfigOption { - if agentAddon.GetAgentAddonOptions().HealthProber == nil { - return nil - } + manifestConfigs := []workapiv1.ManifestConfigOption{} - if agentAddon.GetAgentAddonOptions().HealthProber.Type != agent.HealthProberTypeWork { - return nil + if agentAddon.GetAgentAddonOptions().HealthProber != nil && + agentAddon.GetAgentAddonOptions().HealthProber.Type == agent.HealthProberTypeWork && + agentAddon.GetAgentAddonOptions().HealthProber.WorkProber != nil { + probeRules := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields + for _, rule := range probeRules { + manifestConfigs = append(manifestConfigs, workapiv1.ManifestConfigOption{ + ResourceIdentifier: rule.ResourceIdentifier, + FeedbackRules: rule.ProbeRules, + }) + } } - if agentAddon.GetAgentAddonOptions().HealthProber.WorkProber == nil { - return nil + if updaters := agentAddon.GetAgentAddonOptions().Updaters; updaters != nil { + for _, updater := range updaters { + manifestConfigs = append(manifestConfigs, workapiv1.ManifestConfigOption{ + ResourceIdentifier: updater.ResourceIdentifier, + UpdateStrategy: &updater.UpdateStrategy, + }) + } } - manifestConfigs := []workapiv1.ManifestConfigOption{} - probeRules := agentAddon.GetAgentAddonOptions().HealthProber.WorkProber.ProbeFields - for _, rule := range probeRules { - manifestConfigs = append(manifestConfigs, workapiv1.ManifestConfigOption{ - ResourceIdentifier: rule.ResourceIdentifier, - FeedbackRules: rule.ProbeRules, - }) - } return manifestConfigs } @@ -492,3 +521,50 @@ func getDeletionOrphaningRule(obj runtime.Object) (*workapiv1.OrphaningRule, err } return rule, nil } + +// convert config reference to annotations. +func configsToAnnotations(configReference []addonapiv1alpha1.ConfigReference) (map[string]string, error) { + if len(configReference) == 0 { + return nil, nil + } + + // converts the configReference into a map, key is config name, value is spec hash. + specHashMap := ConfigsToMap(configReference) + + // converts the map into a JSON byte string. + jsonBytes, err := json.Marshal(specHashMap) + if err != nil { + return nil, err + } + + // return a map with key as "open-cluster-management.io/config-spec-hash" and value is the JSON byte string. + // For example: + // open-cluster-management.io/config-spec-hash: '{"addonhubconfigs.addon.open-cluster-management.io//default":"613d134a2ec072a8a6451af913979f496d657ef5", + // "addondeploymentconfigs.addon.open-cluster-management.io/open-cluster-management/default":"cca7df9188fb920dcfab374940452393e2037619"}' + return map[string]string{ + workapiv1.ManifestConfigSpecHashAnnotationKey: string(jsonBytes), + }, nil +} + +// configsToMap returns a map stores the config name as the key and config spec hash as the value. +func ConfigsToMap(configReference []addonapiv1alpha1.ConfigReference) map[string]string { + // config name follows the format of .//, for example, + // addondeploymentconfigs.addon.open-cluster-management.io/open-cluster-management/default. + // for a cluster scoped resource, the namespace would be empty, for example, + // addonhubconfigs.addon.open-cluster-management.io//default. + specHashMap := make(map[string]string, len(configReference)) + for _, v := range configReference { + if v.DesiredConfig == nil { + continue + } + resourceStr := v.Resource + if len(v.Group) > 0 { + resourceStr += fmt.Sprintf(".%s", v.Group) + } + resourceStr += fmt.Sprintf("/%s/%s", v.DesiredConfig.Namespace, v.DesiredConfig.Name) + + specHashMap[resourceStr] = v.DesiredConfig.SpecHash + } + + return specHashMap +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go index e3c93de02..214eb905f 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrapprove.go @@ -19,14 +19,15 @@ import ( v1beta1certificateslisters "k8s.io/client-go/listers/certificates/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) var ( @@ -345,7 +346,8 @@ func unsafeCovertV1beta1KeyUsageToV1KeyUsage(usages []certificatesv1beta1.KeyUsa } // TODO: remove the following block for deprecating V1beta1 CSR compatibility -func unsafeCovertV1beta1ExtraValueToV1ExtraValue(extraValues map[string]certificatesv1beta1.ExtraValue) map[string]certificatesv1.ExtraValue { +func unsafeCovertV1beta1ExtraValueToV1ExtraValue( + extraValues map[string]certificatesv1beta1.ExtraValue) map[string]certificatesv1.ExtraValue { v1Values := make(map[string]certificatesv1.ExtraValue) for k := range extraValues { v1Values[k] = certificatesv1.ExtraValue(extraValues[k]) @@ -354,7 +356,9 @@ func unsafeCovertV1beta1ExtraValueToV1ExtraValue(extraValues map[string]certific } // TODO: remove the following block for deprecating V1beta1 CSR compatibility -func unsafeCovertV1beta1ConditionsToV1Conditions(conditions []certificatesv1beta1.CertificateSigningRequestCondition) []certificatesv1.CertificateSigningRequestCondition { +func unsafeCovertV1beta1ConditionsToV1Conditions( + conditions []certificatesv1beta1.CertificateSigningRequestCondition, +) []certificatesv1.CertificateSigningRequestCondition { v1Conditions := make([]certificatesv1.CertificateSigningRequestCondition, len(conditions)) for i := range conditions { v1Conditions[i] = certificatesv1.CertificateSigningRequestCondition{ diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go index b138082b9..6c0a3d766 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate/csrsign.go @@ -3,8 +3,6 @@ package certificate import ( "context" "fmt" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - clusterv1 "open-cluster-management.io/api/cluster/v1" "strings" certificatesv1 "k8s.io/api/certificates/v1" @@ -16,12 +14,15 @@ import ( "k8s.io/client-go/kubernetes" certificateslisters "k8s.io/client-go/listers/certificates/v1" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) // csrApprovingController auto approve the renewal CertificateSigningRequests for an accepted spoke cluster on the hub. diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/clustermanagement/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/clustermanagement/controller.go deleted file mode 100644 index ab208ffc1..000000000 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/clustermanagement/controller.go +++ /dev/null @@ -1,368 +0,0 @@ -package clustermanagement - -import ( - "context" - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" - "open-cluster-management.io/addon-framework/pkg/utils" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" - addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" - addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" - clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" - clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" -) - -const UnsupportedConfigurationType = "UnsupportedConfiguration" - -// clusterManagementController reconciles instances of managedclusteradd on the hub -// based on the clustermanagementaddon. -type clusterManagementController struct { - addonClient addonv1alpha1client.Interface - managedClusterLister clusterlister.ManagedClusterLister - managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister - clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister - agentAddons map[string]agent.AgentAddon -} - -func NewClusterManagementController( - addonClient addonv1alpha1client.Interface, - clusterInformers clusterinformers.ManagedClusterInformer, - addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, - clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, - agentAddons map[string]agent.AgentAddon, -) factory.Controller { - c := &clusterManagementController{ - addonClient: addonClient, - managedClusterLister: clusterInformers.Lister(), - managedClusterAddonLister: addonInformers.Lister(), - clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), - agentAddons: agentAddons, - } - - return factory.New().WithFilteredEventsInformersQueueKeysFunc( - func(obj runtime.Object) []string { - key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - return []string{key} - }, - func(obj interface{}) bool { - accessor, _ := meta.Accessor(obj) - if _, ok := c.agentAddons[accessor.GetName()]; !ok { - return false - } - - return true - }, - addonInformers.Informer(), clusterManagementAddonInformers.Informer()). - WithSync(c.sync).ToController("cluster-management-addon-controller") -} - -func (c *clusterManagementController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { - klog.V(4).Infof("Reconciling addon %q", key) - - namespace, addonName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - // ignore addon whose key is invalid - return nil - } - - clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) - switch { - case errors.IsNotFound(err): - return nil - case err != nil: - return err - } - - if len(namespace) == 0 { - return c.syncAllAddon(syncCtx, addonName) - } - - addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) - switch { - case errors.IsNotFound(err): - return nil - case err != nil: - return err - } - - addonCopy := addon.DeepCopy() - - // Add owner if it does not exist - owner := metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) - modified := utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) - if modified { - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Update(ctx, addonCopy, metav1.UpdateOptions{}) - return err - } - - // Add related ClusterManagementAddon - utils.MergeRelatedObjects(&modified, &addonCopy.Status.RelatedObjects, addonapiv1alpha1.ObjectReference{ - Name: clusterManagementAddon.Name, - Resource: "clustermanagementaddons", - Group: addonapiv1alpha1.GroupVersion.Group, - }) - - // Add config references - if err := mergeConfigReferences(&modified, c.agentAddons[addonName], clusterManagementAddon, addonCopy); err != nil { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: UnsupportedConfigurationType, - Status: metav1.ConditionTrue, - Reason: "ConfigurationUnsupported", - Message: err.Error(), - }) - return utils.PatchAddonCondition(ctx, c.addonClient, addonCopy, addon) - } - - // Update unsupported configuration condition if configuration becomes corrected - unsupportedConfigCondition := meta.FindStatusCondition(addon.Status.Conditions, UnsupportedConfigurationType) - if unsupportedConfigCondition != nil { - meta.SetStatusCondition(&addonCopy.Status.Conditions, metav1.Condition{ - Type: UnsupportedConfigurationType, - Status: metav1.ConditionFalse, - Reason: "ConfigurationSupported", - Message: "the config resources are supported", - }) - modified = true - } - - if !modified { - return nil - } - - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).UpdateStatus(ctx, addonCopy, metav1.UpdateOptions{}) - return err -} - -func (c *clusterManagementController) syncAllAddon(syncCtx factory.SyncContext, addonName string) error { - clusters, err := c.managedClusterLister.List(labels.Everything()) - if err != nil { - return err - } - - for _, cluster := range clusters { - addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(cluster.Name).Get(addonName) - switch { - case errors.IsNotFound(err): - continue - case err != nil: - return err - } - - key, _ := cache.MetaNamespaceKeyFunc(addon) - syncCtx.Queue().Add(key) - } - - return nil -} - -func mergeConfigReferences( - modified *bool, - agent agent.AgentAddon, - clusterManagementAddon *addonapiv1alpha1.ClusterManagementAddOn, - addon *addonapiv1alpha1.ManagedClusterAddOn, -) error { - // make sure the supported configs in ClusterManagementAddon are registered and no duplicated - cmaConfigSet, err := validateCMAConfigs(clusterManagementAddon.Spec.SupportedConfigs, agent.GetAgentAddonOptions().SupportedConfigGVRs) - if err != nil { - return err - } - - if len(cmaConfigSet) == 0 { - if len(addon.Spec.Configs) != 0 { - return fmt.Errorf("the supported config resources are required in ClusterManagementAddon") - } - - // the supported configs are not specified and no config refers in the managed cluster addon - // for compatibility, try to merge old addon configuration - // TODO this will be removed after next few releases - mergeAddOnConfiguration(modified, clusterManagementAddon, addon) - return nil - } - - // merge the ClusterManagementAddOn default configs and ManagedClusterAddOn configs - // TODO After merged there may be multiple configs with the same group and resource in the config reference list, - // currently, we save all of them, in the future, we may consider a way to define which config should be used - expectedConfigReferences, err := mergeConfigs(cmaConfigSet, addon.Spec.Configs) - if err != nil { - return err - } - - if len(expectedConfigReferences) == 0 { - // the config references are not defined, ignore - return nil - } - - // we should ignore the last observed generation when we compare two config references - actualConfigReferences := []addonapiv1alpha1.ConfigReference{} - for _, config := range addon.Status.ConfigReferences { - actualConfigReferences = append(actualConfigReferences, addonapiv1alpha1.ConfigReference{ - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: config.Group, - Resource: config.Resource, - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Name: config.Name, - Namespace: config.Namespace, - }, - }) - } - - if !equality.Semantic.DeepEqual(actualConfigReferences, expectedConfigReferences) { - addon.Status.ConfigReferences = expectedConfigReferences - *modified = true - } - - return nil -} - -// for compatibility, ignore the deprecation warnings -func mergeAddOnConfiguration( - modified *bool, - clusterManagementAddon *addonapiv1alpha1.ClusterManagementAddOn, - addon *addonapiv1alpha1.ManagedClusterAddOn, -) { - expectedCoordinate := addonapiv1alpha1.ConfigCoordinates{ - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - CRDName: clusterManagementAddon.Spec.AddOnConfiguration.CRDName, - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - CRName: clusterManagementAddon.Spec.AddOnConfiguration.CRName, - } - actualCoordinate := addonapiv1alpha1.ConfigCoordinates{ - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - CRDName: addon.Status.AddOnConfiguration.CRDName, - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - CRName: addon.Status.AddOnConfiguration.CRName, - } - - if !equality.Semantic.DeepEqual(expectedCoordinate, actualCoordinate) { - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - addon.Status.AddOnConfiguration.CRDName = expectedCoordinate.CRDName - //nolint:staticcheck - //lint:ignore SA1019 Ignore the deprecation warnings - addon.Status.AddOnConfiguration.CRName = expectedCoordinate.CRName - *modified = true - } -} - -func isRegistedConfig(gvrs []schema.GroupVersionResource, config addonapiv1alpha1.ConfigMeta) bool { - for _, gvr := range gvrs { - if gvr.Group == config.Group && gvr.Resource == config.Resource { - return true - } - } - return false -} - -func listRegistedConfigs(gvrs []schema.GroupVersionResource) string { - keys := make([]string, 0, len(gvrs)) - for _, gvr := range gvrs { - keys = append(keys, gvr.String()) - } - return strings.Join(keys, ";") -} - -func validateCMAConfigs(cmaSupportedConfigs []addonapiv1alpha1.ConfigMeta, - registedConfigs []schema.GroupVersionResource) (map[schema.GroupResource]*addonapiv1alpha1.ConfigReferent, error) { - supportedConfigSet := map[schema.GroupResource]*addonapiv1alpha1.ConfigReferent{} - for _, cmaConfig := range cmaSupportedConfigs { - configGR := schema.GroupResource{ - Group: cmaConfig.Group, - Resource: cmaConfig.Resource, - } - - _, existed := supportedConfigSet[configGR] - if existed { - return nil, fmt.Errorf("the config resource %q is duplicated", configGR.String()) - } - - // the supported config in ClusterManagementAddon should be registed in add-on framework - if !isRegistedConfig(registedConfigs, cmaConfig) { - return nil, fmt.Errorf("the config resource %q in ClusterManagementAddon is unregistered, registered configs: %q", - configGR.String(), listRegistedConfigs(registedConfigs)) - } - - supportedConfigSet[configGR] = cmaConfig.DefaultConfig - } - - return supportedConfigSet, nil -} - -func mergeConfigs( - cmaConfigSet map[schema.GroupResource]*addonapiv1alpha1.ConfigReferent, - mcaConfigs []addonapiv1alpha1.AddOnConfig) ([]addonapiv1alpha1.ConfigReference, error) { - configReferences := []addonapiv1alpha1.ConfigReference{} - mcaConfigGRs := []schema.GroupResource{} - - // using ManagedClusterAddOn configs override the ClusterManagementAddOn default configs - for _, mcaConfig := range mcaConfigs { - configGR := schema.GroupResource{ - Group: mcaConfig.Group, - Resource: mcaConfig.Resource, - } - - _, supported := cmaConfigSet[configGR] - if !supported { - return nil, fmt.Errorf("the config resource %q is unsupported", configGR.String()) - } - - if mcaConfig.Name == "" { - return nil, fmt.Errorf("the config name is required in %q", configGR.String()) - } - - configReferences = append(configReferences, addonapiv1alpha1.ConfigReference{ - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: mcaConfig.Group, - Resource: mcaConfig.Resource, - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Name: mcaConfig.Name, - Namespace: mcaConfig.Namespace, - }, - }) - - mcaConfigGRs = append(mcaConfigGRs, configGR) - } - - // remove the ClusterManagementAddOn default configs from ManagedClusterAddOn configs - for _, configGR := range mcaConfigGRs { - delete(cmaConfigSet, configGR) - } - - // add the default configs from ClusterManagementAddOn - for groupResource, defautlConifg := range cmaConfigSet { - if defautlConifg == nil { - continue - } - - configReferences = append(configReferences, addonapiv1alpha1.ConfigReference{ - ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{ - Group: groupResource.Group, - Resource: groupResource.Resource, - }, - ConfigReferent: addonapiv1alpha1.ConfigReferent{ - Name: defautlConifg.Name, - Namespace: defautlConifg.Namespace, - }, - }) - } - - return configReferences, nil -} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go new file mode 100644 index 000000000..584aa5112 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go @@ -0,0 +1,308 @@ +package managementaddonconfig + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/dynamic/dynamiclister" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" +) + +const ( + controllerName = "management-addon-config-controller" + byClusterManagementAddOnConfig = "by-cluster-management-addon-config" +) + +type enqueueFunc func(obj interface{}) + +// clusterManagementAddonConfigController reconciles all interested addon config types (GroupVersionResource) on the hub. +type clusterManagementAddonConfigController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + clusterManagementAddonIndexer cache.Indexer + configListers map[schema.GroupResource]dynamiclister.Lister + queue workqueue.RateLimitingInterface +} + +func NewManagementAddonConfigController( + addonClient addonv1alpha1client.Interface, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, +) factory.Controller { + syncCtx := factory.NewSyncContext(controllerName) + + c := &clusterManagementAddonConfigController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + clusterManagementAddonIndexer: clusterManagementAddonInformers.Informer().GetIndexer(), + configListers: map[schema.GroupResource]dynamiclister.Lister{}, + queue: syncCtx.Queue(), + } + + configInformers := c.buildConfigInformers(configInformerFactory, configGVRs) + + if err := clusterManagementAddonInformers.Informer().AddIndexers(cache.Indexers{byClusterManagementAddOnConfig: c.indexByConfig}); err != nil { + utilruntime.HandleError(err) + } + + return factory.New(). + WithSyncContext(syncCtx). + WithInformersQueueKeysFunc(func(obj runtime.Object) []string { + key, _ := cache.MetaNamespaceKeyFunc(obj) + return []string{key} + }, clusterManagementAddonInformers.Informer()). + WithBareInformers(configInformers...). + WithSync(c.sync).ToController(controllerName) +} + +func (c *clusterManagementAddonConfigController) buildConfigInformers( + configInformerFactory dynamicinformer.DynamicSharedInformerFactory, + configGVRs map[schema.GroupVersionResource]bool, +) []factory.Informer { + configInformers := []factory.Informer{} + for gvr := range configGVRs { + indexInformer := configInformerFactory.ForResource(gvr).Informer() + _, err := indexInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueueClusterManagementAddOnsByConfig(gvr), + UpdateFunc: func(oldObj, newObj interface{}) { + c.enqueueClusterManagementAddOnsByConfig(gvr)(newObj) + }, + DeleteFunc: c.enqueueClusterManagementAddOnsByConfig(gvr), + }, + ) + if err != nil { + utilruntime.HandleError(err) + } + configInformers = append(configInformers, indexInformer) + c.configListers[schema.GroupResource{Group: gvr.Group, Resource: gvr.Resource}] = dynamiclister.New(indexInformer.GetIndexer(), gvr) + } + return configInformers +} + +func (c *clusterManagementAddonConfigController) enqueueClusterManagementAddOnsByConfig(gvr schema.GroupVersionResource) enqueueFunc { + return func(obj interface{}) { + namespaceName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get accessor of object: %v", obj)) + return + } + + objs, err := c.clusterManagementAddonIndexer.ByIndex(byClusterManagementAddOnConfig, fmt.Sprintf("%s/%s/%s", gvr.Group, gvr.Resource, namespaceName)) + if err != nil { + utilruntime.HandleError(fmt.Errorf("error to get addons: %v", err)) + return + } + + for _, obj := range objs { + if obj == nil { + continue + } + key, _ := cache.MetaNamespaceKeyFunc(obj) + c.queue.Add(key) + } + } +} + +func (c *clusterManagementAddonConfigController) indexByConfig(obj interface{}) ([]string, error) { + cma, ok := obj.(*addonapiv1alpha1.ClusterManagementAddOn) + if !ok { + return nil, fmt.Errorf("obj is supposed to be a ClusterManagementAddOn, but is %T", obj) + } + + configNames := sets.New[string]() + for _, defaultConfigRef := range cma.Status.DefaultConfigReferences { + if defaultConfigRef.DesiredConfig == nil || defaultConfigRef.DesiredConfig.Name == "" { + // bad config reference, ignore + continue + } + + configNames.Insert(getIndex(defaultConfigRef.ConfigGroupResource, *defaultConfigRef.DesiredConfig)) + } + + for _, installProgression := range cma.Status.InstallProgressions { + for _, configReference := range installProgression.ConfigReferences { + if configReference.DesiredConfig == nil || configReference.DesiredConfig.Name == "" { + // bad config reference, ignore + continue + } + + configNames.Insert(getIndex(configReference.ConfigGroupResource, *configReference.DesiredConfig)) + } + } + + return configNames.UnsortedList(), nil +} + +func (c *clusterManagementAddonConfigController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + cma, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + // addon cloud be deleted, ignore + return nil + } + if err != nil { + return err + } + + cmaCopy := cma.DeepCopy() + + if err := c.updateConfigSpecHash(cmaCopy); err != nil { + return err + } + + return c.patchConfigReferences(ctx, cma, cmaCopy) +} + +func (c *clusterManagementAddonConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.ClusterManagementAddOn) error { + + for i, defaultConfigReference := range cma.Status.DefaultConfigReferences { + if defaultConfigReference.DesiredConfig == nil || defaultConfigReference.DesiredConfig.Name == "" { + continue + } + + specHash, err := c.getConfigSpecHash(defaultConfigReference.ConfigGroupResource, defaultConfigReference.DesiredConfig.ConfigReferent) + if err != nil { + return nil + } + cma.Status.DefaultConfigReferences[i].DesiredConfig.SpecHash = specHash + } + + for i, installProgression := range cma.Status.InstallProgressions { + for j, configReference := range installProgression.ConfigReferences { + if configReference.DesiredConfig == nil || configReference.DesiredConfig.Name == "" { + continue + } + + specHash, err := c.getConfigSpecHash(configReference.ConfigGroupResource, configReference.DesiredConfig.ConfigReferent) + if err != nil { + return nil + } + cma.Status.InstallProgressions[i].ConfigReferences[j].DesiredConfig.SpecHash = specHash + } + } + + return nil +} + +func (c *clusterManagementAddonConfigController) patchConfigReferences(ctx context.Context, old, new *addonapiv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status.DefaultConfigReferences, old.Status.DefaultConfigReferences) && + equality.Semantic.DeepEqual(new.Status.InstallProgressions, old.Status.InstallProgressions) { + return nil + } + + oldData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ + Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: old.Status.DefaultConfigReferences, + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ + DefaultConfigReferences: new.Status.DefaultConfigReferences, + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(4).Infof("Patching addon %s/%s config reference with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, + new.Name, + types.MergePatchType, + patchBytes, + metav1.PatchOptions{}, + "status", + ) + return err +} + +func (c *clusterManagementAddonConfigController) getConfigSpecHash(gr addonapiv1alpha1.ConfigGroupResource, + cr addonapiv1alpha1.ConfigReferent) (string, error) { + lister, ok := c.configListers[schema.GroupResource{Group: gr.Group, Resource: gr.Resource}] + if !ok { + return "", nil + } + + var config *unstructured.Unstructured + var err error + if cr.Namespace == "" { + config, err = lister.Get(cr.Name) + } else { + config, err = lister.Namespace(cr.Namespace).Get(cr.Name) + } + if errors.IsNotFound(err) { + return "", nil + } + if err != nil { + return "", err + } + + return GetSpecHash(config) +} + +func getIndex(configGroupResource addonapiv1alpha1.ConfigGroupResource, configSpecHash addonapiv1alpha1.ConfigSpecHash) string { + if configSpecHash.Namespace != "" { + return fmt.Sprintf("%s/%s/%s/%s", configGroupResource.Group, configGroupResource.Resource, configSpecHash.Namespace, configSpecHash.Name) + } + + return fmt.Sprintf("%s/%s/%s", configGroupResource.Group, configGroupResource.Resource, configSpecHash.Name) +} + +func GetSpecHash(obj *unstructured.Unstructured) (string, error) { + spec, ok := obj.Object["spec"] + if !ok { + return "", fmt.Errorf("object has no spec field") + } + + specBytes, err := json.Marshal(spec) + if err != nil { + return "", err + } + + hash := sha256.Sum256(specBytes) + + return fmt.Sprintf("%x", hash), nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go index 1413fd216..2010b27aa 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" + "open-cluster-management.io/addon-framework/pkg/utils" + jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -14,31 +16,32 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) -// addonConfigurationController reconciles instances of ManagedClusterAddon on the hub. -type addonConfigurationController struct { +// addonRegistrationController reconciles instances of ManagedClusterAddon on the hub. +type addonRegistrationController struct { addonClient addonv1alpha1client.Interface managedClusterLister clusterlister.ManagedClusterLister managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister agentAddons map[string]agent.AgentAddon } -func NewAddonConfigurationController( +func NewAddonRegistrationController( addonClient addonv1alpha1client.Interface, clusterInformers clusterinformers.ManagedClusterInformer, addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, agentAddons map[string]agent.AgentAddon, ) factory.Controller { - c := &addonConfigurationController{ + c := &addonRegistrationController{ addonClient: addonClient, managedClusterLister: clusterInformers.Lister(), managedClusterAddonLister: addonInformers.Lister(), @@ -62,7 +65,7 @@ func NewAddonConfigurationController( WithSync(c.sync).ToController("addon-registration-controller") } -func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { +func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { klog.V(4).Infof("Reconciling addon registration %q", key) clusterName, addonName, err := cache.SplitMetaNamespaceKey(key) @@ -92,45 +95,92 @@ func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory if err != nil { return err } + managedClusterAddonCopy := managedClusterAddon.DeepCopy() + // wait until the mca's ownerref is set. + if !utils.IsOwnedByCMA(managedClusterAddonCopy) { + return nil + } + + var supportedConfigs []addonapiv1alpha1.ConfigGroupResource + for _, config := range agentAddon.GetAgentAddonOptions().SupportedConfigGVRs { + supportedConfigs = append(supportedConfigs, addonapiv1alpha1.ConfigGroupResource{ + Group: config.Group, + Resource: config.Resource, + }) + } + managedClusterAddonCopy.Status.SupportedConfigs = supportedConfigs + registrationOption := agentAddon.GetAgentAddonOptions().Registration if registrationOption == nil { - return nil + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, + Message: "Registration of the addon agent is configured", + }) + return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) } if registrationOption.PermissionConfig != nil { err = registrationOption.PermissionConfig(managedCluster, managedClusterAddon) if err != nil { + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionFalse, + Reason: addonapiv1alpha1.RegistrationAppliedSetPermissionFailed, + Message: fmt.Sprintf("Failed to set permission for hub agent: %v", err), + }) + if patchErr := c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon); patchErr != nil { + return patchErr + } return err } } if registrationOption.CSRConfigurations == nil { - return nil + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, + Status: metav1.ConditionTrue, + Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, + Message: "Registration of the addon agent is configured", + }) + return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) } configs := registrationOption.CSRConfigurations(managedCluster) managedClusterAddonCopy.Status.Registrations = configs + + managedClusterAddonCopy.Status.Namespace = registrationOption.Namespace + if len(managedClusterAddonCopy.Spec.InstallNamespace) > 0 { + managedClusterAddonCopy.Status.Namespace = managedClusterAddonCopy.Spec.InstallNamespace + } + meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ - Type: "RegistrationApplied", + Type: addonapiv1alpha1.ManagedClusterAddOnRegistrationApplied, Status: metav1.ConditionTrue, - Reason: "RegistrationConfigured", + Reason: addonapiv1alpha1.RegistrationAppliedSetPermissionApplied, Message: "Registration of the addon agent is configured", }) return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) } -func (c *addonConfigurationController) patchAddonStatus(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { - if equality.Semantic.DeepEqual(new.Status, old.Status) { +func (c *addonRegistrationController) patchAddonStatus(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status.Registrations, old.Status.Registrations) && + equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) && + equality.Semantic.DeepEqual(new.Status.SupportedConfigs, old.Status.SupportedConfigs) && + new.Status.Namespace == old.Status.Namespace { return nil } oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - Registrations: old.Status.Registrations, - Conditions: old.Status.Conditions, + Registrations: old.Status.Registrations, + Namespace: old.Status.Namespace, + SupportedConfigs: old.Status.SupportedConfigs, + Conditions: old.Status.Conditions, }, }) if err != nil { @@ -143,8 +193,10 @@ func (c *addonConfigurationController) patchAddonStatus(ctx context.Context, new ResourceVersion: new.ResourceVersion, }, Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - Registrations: new.Status.Registrations, - Conditions: new.Status.Conditions, + Registrations: new.Status.Registrations, + Namespace: new.Status.Namespace, + SupportedConfigs: new.Status.SupportedConfigs, + Conditions: new.Status.Conditions, }, }) if err != nil { @@ -156,7 +208,8 @@ func (c *addonConfigurationController) patchAddonStatus(ctx context.Context, new return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) } - klog.Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch(ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go index 2390b1f96..6f3496f7a 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/manager.go @@ -3,9 +3,14 @@ package addonmanager import ( "context" "fmt" - addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" "time" + "k8s.io/client-go/tools/cache" + + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration" + "open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" @@ -13,22 +18,23 @@ import ( kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" + clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" + clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" + workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonhealthcheck" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addoninstall" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/certificate" - "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/clustermanagement" + "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig" "open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/utils" - addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" - addoninformers "open-cluster-management.io/api/client/addon/informers/externalversions" - clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" - clusterv1informers "open-cluster-management.io/api/client/cluster/informers/externalversions" - workv1client "open-cluster-management.io/api/client/work/clientset/versioned" - workv1informers "open-cluster-management.io/api/client/work/informers/externalversions" ) // AddonManager is the interface to initialize a manager on hub to manage the addon @@ -101,10 +107,10 @@ func (a *addonManager) Start(ctx context.Context) error { return err } - addonNames := []string{} - for key, agent := range a.addonAgents { + var addonNames []string + for key, agentImpl := range a.addonAgents { addonNames = append(addonNames, key) - for _, configGVR := range agent.GetAgentAddonOptions().SupportedConfigGVRs { + for _, configGVR := range agentImpl.GetAgentAddonOptions().SupportedConfigGVRs { a.addonConfigs[configGVR] = true } } @@ -149,21 +155,13 @@ func (a *addonManager) Start(ctx context.Context) error { a.addonAgents, ) - registrationController := registration.NewAddonConfigurationController( + registrationController := registration.NewAddonRegistrationController( addonClient, clusterInformers.Cluster().V1().ManagedClusters(), addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), a.addonAgents, ) - clusterManagementController := clustermanagement.NewClusterManagementController( - addonClient, - clusterInformers.Cluster().V1().ManagedClusters(), - addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), - a.addonAgents, - ) - addonInstallController := addoninstall.NewAddonInstallController( addonClient, clusterInformers.Cluster().V1().ManagedClusters(), @@ -171,14 +169,16 @@ func (a *addonManager) Start(ctx context.Context) error { a.addonAgents, ) - addonHealthCheckController := addonhealthcheck.NewAddonHealthCheckController( + // This is a duplicate controller in general addon-manager. This should be removed when we + // alway enable the addon-manager + addonOwnerController := addonowner.NewAddonOwnerController( addonClient, addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), - workInformers.Work().V1().ManifestWorks(), - a.addonAgents, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + utils.ManagedBySelf(a.addonAgents), ) - var addonConfigController factory.Controller + var addonConfigController, managementAddonConfigController, addonConfigurationController factory.Controller if len(a.addonConfigs) != 0 { addonConfigController = addonconfig.NewAddonConfigController( addonClient, @@ -186,6 +186,38 @@ func (a *addonManager) Start(ctx context.Context) error { dynamicInformers, a.addonConfigs, ) + managementAddonConfigController = managementaddonconfig.NewManagementAddonConfigController( + addonClient, + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + dynamicInformers, + a.addonConfigs, + ) + + // start addonConfiguration controller, note this is to handle the case when the general addon-manager + // is not started, we should consider to remove this when the general addon-manager are always started. + // This controller will also ignore the installStrategy part. + err = addonInformers.Addon().V1alpha1().ClusterManagementAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ClusterManagementAddonByPlacement: index.IndexClusterManagementAddonByPlacement, + }) + if err != nil { + return err + } + + err = addonInformers.Addon().V1alpha1().ManagedClusterAddOns().Informer().AddIndexers( + cache.Indexers{ + index.ManagedClusterAddonByName: index.IndexManagedClusterAddonByName, + }) + if err != nil { + return err + } + addonConfigurationController = addonconfiguration.NewAddonConfigurationController( + addonClient, + addonInformers.Addon().V1alpha1().ManagedClusterAddOns(), + addonInformers.Addon().V1alpha1().ClusterManagementAddOns(), + nil, nil, + utils.ManagedBySelf(a.addonAgents), + ) } var csrApproveController factory.Controller @@ -231,12 +263,18 @@ func (a *addonManager) Start(ctx context.Context) error { go deployController.Run(ctx, 1) go registrationController.Run(ctx, 1) - go clusterManagementController.Run(ctx, 1) go addonInstallController.Run(ctx, 1) - go addonHealthCheckController.Run(ctx, 1) + + go addonOwnerController.Run(ctx, 1) if addonConfigController != nil { go addonConfigController.Run(ctx, 1) } + if managementAddonConfigController != nil { + go managementAddonConfigController.Run(ctx, 1) + } + if addonConfigurationController != nil { + go addonConfigurationController.Run(ctx, 1) + } if csrApproveController != nil { go csrApproveController.Run(ctx, 1) } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go index 5ac76fa99..372c3fbcc 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/agent/inteface.go @@ -55,6 +55,11 @@ type AgentAddonOptions struct { // +optional InstallStrategy *InstallStrategy + // Updaters select a set of resources and define the strategies to update them. + // UpdateStrategy is Update if no Updater is defined for a resource. + // +optional + Updaters []Updater + // HealthProber defines how is the healthiness status of the ManagedClusterAddon probed. // Note that the prescribed prober type here only applies to the automatically installed // addons configured via InstallStrategy. @@ -89,6 +94,10 @@ type RegistrationOption struct { // +required CSRConfigurations func(cluster *clusterv1.ManagedCluster) []addonapiv1alpha1.RegistrationConfig + // Namespace is the namespace where registraiton credential will be put on the managed cluster. It + // will be overridden by installNamespace on ManagedClusterAddon spec if set + Namespace string + // CSRApproveCheck checks whether the addon agent registration should be approved by the hub. // Addon hub controller can implement this func to auto-approve all the CSRs. A better CSR check is // recommended to include (1) the validity of requester's requesting identity and (2) the other request @@ -133,6 +142,14 @@ func (s *InstallStrategy) GetManagedClusterFilter() func(cluster *clusterv1.Mana return s.managedClusterFilter } +type Updater struct { + // ResourceIdentifier sets what resources the strategy applies to + ResourceIdentifier workapiv1.ResourceIdentifier + + // UpdateStrategy defines the strategy used to update the manifests. + UpdateStrategy workapiv1.UpdateStrategy +} + type HealthProber struct { Type HealthProberType @@ -151,7 +168,7 @@ type WorkHealthProber struct { // ProbeField defines the field of a resource to be probed type ProbeField struct { - // ResourceIdentifier sets what resource shoule be probed + // ResourceIdentifier sets what resource should be probed ResourceIdentifier workapiv1.ResourceIdentifier // ProbeRules sets the rules to probe the field diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go b/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go index 9073fe5f0..008fc0e07 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/assets/assets.go @@ -126,7 +126,7 @@ func LoadFilesRecursively(dir string, predicates ...FileInfoPredicate) (map[stri } } - bs, err := os.ReadFile(path) + bs, err := os.ReadFile(filepath.Clean(path)) if err != nil { return err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go b/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go new file mode 100644 index 000000000..ac8015fdc --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/index/index.go @@ -0,0 +1,103 @@ +package index + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" +) + +const ( + ClusterManagementAddonByPlacement = "clusterManagementAddonByPlacement" + ManagedClusterAddonByName = "managedClusterAddonByName" +) + +func IndexClusterManagementAddonByPlacement(obj interface{}) ([]string, error) { + cma, ok := obj.(*addonv1alpha1.ClusterManagementAddOn) + + if !ok { + return []string{}, fmt.Errorf("obj %T is not a ClusterManagementAddon", obj) + } + + var keys []string + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return keys, nil + } + + for _, placement := range cma.Spec.InstallStrategy.Placements { + key := fmt.Sprintf("%s/%s", placement.PlacementRef.Namespace, placement.PlacementRef.Name) + keys = append(keys, key) + } + + return keys, nil +} + +func IndexManagedClusterAddonByName(obj interface{}) ([]string, error) { + mca, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) + + if !ok { + return []string{}, fmt.Errorf("obj %T is not a ManagedClusterAddon", obj) + } + + return []string{mca.Name}, nil +} + +func ClusterManagementAddonByPlacementQueueKey( + cmai addoninformerv1alpha1.ClusterManagementAddOnInformer) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + objs, err := cmai.Informer().GetIndexer().ByIndex(ClusterManagementAddonByPlacement, key) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + var keys []string + for _, o := range objs { + cma := o.(*addonv1alpha1.ClusterManagementAddOn) + klog.V(4).Infof("enqueue ClusterManagementAddon %s, because of placement %s", cma.Name, key) + keys = append(keys, cma.Name) + } + + return keys + } +} + +func ClusterManagementAddonByPlacementDecisionQueueKey( + cmai addoninformerv1alpha1.ClusterManagementAddOnInformer) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + accessor, _ := meta.Accessor(obj) + placementName, ok := accessor.GetLabels()[clusterv1beta1.PlacementLabel] + if !ok { + return []string{} + } + + objs, err := cmai.Informer().GetIndexer().ByIndex(ClusterManagementAddonByPlacement, + fmt.Sprintf("%s/%s", accessor.GetNamespace(), placementName)) + if err != nil { + utilruntime.HandleError(err) + return []string{} + } + + var keys []string + for _, o := range objs { + cma := o.(*addonv1alpha1.ClusterManagementAddOn) + klog.V(4).Infof("enqueue ClusterManagementAddon %s, because of placementDecision %s/%s", + cma.Name, accessor.GetNamespace(), accessor.GetName()) + keys = append(keys, cma.Name) + } + + return keys + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/lease/lease_controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/lease/lease_controller.go index 4c570e897..d298298cf 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/lease/lease_controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/lease/lease_controller.go @@ -2,6 +2,7 @@ package lease import ( "context" + "net/http" "time" coordinationv1 "k8s.io/api/coordination/v1" @@ -9,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" @@ -75,7 +77,7 @@ func (r *leaseUpdater) updateLease(ctx context.Context, namespace string, client lease, err := client.CoordinationV1().Leases(namespace).Get(ctx, r.leaseName, metav1.GetOptions{}) switch { case errors.IsNotFound(err): - //create lease + // create lease lease := &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: r.leaseName, @@ -94,7 +96,7 @@ func (r *leaseUpdater) updateLease(ctx context.Context, namespace string, client case err != nil: return err default: - //update lease + // update lease lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now()} if _, err = client.CoordinationV1().Leases(namespace).Update(context.TODO(), lease, metav1.UpdateOptions{}); err != nil { return err @@ -146,3 +148,24 @@ func CheckAddonPodFunc(podGetter corev1client.PodsGetter, namespace, labelSelect } } + +// CheckManagedClusterHealthFunc checks the health status of the cluster api server +func CheckManagedClusterHealthFunc(managedClusterDiscoveryClient discovery.DiscoveryInterface) func() bool { + return func() bool { + statusCode := 0 + _ = managedClusterDiscoveryClient.RESTClient().Get().AbsPath("/livez").Do(context.TODO()).StatusCode(&statusCode) + if statusCode == http.StatusOK { + return true + } + + // for backward compatible, the livez endpoint is supported from Kubernetes 1.16, so if the livez is not found or + // forbidden, the healthz endpoint will be used. + if statusCode == http.StatusNotFound || statusCode == http.StatusForbidden { + _ = managedClusterDiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do(context.TODO()).StatusCode(&statusCode) + if statusCode == http.StatusOK { + return true + } + } + return false + } +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go new file mode 100644 index 000000000..d4285fb58 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/addon_configuration_reconciler.go @@ -0,0 +1,114 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type managedClusterAddonConfigurationReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *managedClusterAddonConfigurationReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + + for _, addon := range graph.addonToUpdate() { + mca := d.mergeAddonConfig(addon.mca, addon.desiredConfigs) + err := d.patchAddonStatus(ctx, mca, addon.mca) + if err != nil { + errs = append(errs, err) + } + } + + return cma, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *managedClusterAddonConfigurationReconciler) mergeAddonConfig( + mca *addonv1alpha1.ManagedClusterAddOn, desiredConfigMap addonConfigMap) *addonv1alpha1.ManagedClusterAddOn { + mcaCopy := mca.DeepCopy() + + var mergedConfigs []addonv1alpha1.ConfigReference + // remove configs that are not desired + for _, config := range mcaCopy.Status.ConfigReferences { + if _, ok := desiredConfigMap[config.ConfigGroupResource]; ok { + mergedConfigs = append(mergedConfigs, config) + } + } + + // append or update configs + for _, config := range desiredConfigMap { + var match bool + for i := range mergedConfigs { + if mergedConfigs[i].ConfigGroupResource != config.ConfigGroupResource { + continue + } + + match = true + // set LastObservedGeneration to 0 when config name/namespace changes + if mergedConfigs[i].DesiredConfig != nil && (mergedConfigs[i].DesiredConfig.ConfigReferent != config.DesiredConfig.ConfigReferent) { + mergedConfigs[i].LastObservedGeneration = 0 + } + mergedConfigs[i].ConfigReferent = config.ConfigReferent + mergedConfigs[i].DesiredConfig = config.DesiredConfig.DeepCopy() + } + + if !match { + mergedConfigs = append(mergedConfigs, config) + } + } + + mcaCopy.Status.ConfigReferences = mergedConfigs + return mcaCopy +} + +func (d *managedClusterAddonConfigurationReconciler) patchAddonStatus(ctx context.Context, new, old *addonv1alpha1.ManagedClusterAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: old.Status.Namespace, + ConfigReferences: old.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: new.Status.Namespace, + ConfigReferences: new.Status.ConfigReferences, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go new file mode 100644 index 000000000..0266960e6 --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/controller.go @@ -0,0 +1,211 @@ +package addonconfiguration + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + clusterinformersv1beta1 "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1beta1" + clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" +) + +// addonConfigurationController is a controller to update configuration of mca with the following order +// 1. use configuration in mca spec if it is set +// 2. use configuration in install strategy +// 3. use configuration in the default configuration in cma +type addonConfigurationController struct { + addonClient addonv1alpha1client.Interface + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + clusterManagementAddonIndexer cache.Indexer + managedClusterAddonIndexer cache.Indexer + addonFilterFunc factory.EventFilterFunc + placementLister clusterlisterv1beta1.PlacementLister + placementDecisionLister clusterlisterv1beta1.PlacementDecisionLister + + reconcilers []addonConfigurationReconcile +} + +type addonConfigurationReconcile interface { + reconcile(ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, + graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) +} + +type reconcileState int64 + +const ( + reconcileStop reconcileState = iota + reconcileContinue +) + +func NewAddonConfigurationController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + placementInformer clusterinformersv1beta1.PlacementInformer, + placementDecisionInformer clusterinformersv1beta1.PlacementDecisionInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonConfigurationController{ + addonClient: addonClient, + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + clusterManagementAddonIndexer: clusterManagementAddonInformers.Informer().GetIndexer(), + managedClusterAddonIndexer: addonInformers.Informer().GetIndexer(), + addonFilterFunc: addonFilterFunc, + } + + c.reconcilers = []addonConfigurationReconcile{ + &managedClusterAddonConfigurationReconciler{ + addonClient: addonClient, + }, + &clusterManagementAddonProgressingReconciler{ + addonClient: addonClient, + }, + } + + controllerFactory := factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, + clusterManagementAddonInformers.Informer()).WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()) + + // This is to handle the case the self managed addon-manager does not have placementInformer/placementDecisionInformer. + // we will not consider installStrategy related placement for self managed addon-manager. + if placementInformer != nil && placementDecisionInformer != nil { + controllerFactory = controllerFactory.WithInformersQueueKeysFunc( + index.ClusterManagementAddonByPlacementDecisionQueueKey(clusterManagementAddonInformers), placementDecisionInformer.Informer()). + WithInformersQueueKeysFunc(index.ClusterManagementAddonByPlacementQueueKey(clusterManagementAddonInformers), placementInformer.Informer()) + c.placementLister = placementInformer.Lister() + c.placementDecisionLister = placementDecisionInformer.Lister() + } + + return controllerFactory.WithSync(c.sync).ToController("addon-configuration-controller") +} + +func (c *addonConfigurationController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + _, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + klog.V(4).Infof("Reconciling addon %q", addonName) + + cma, err := c.clusterManagementAddonLister.Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + if !c.addonFilterFunc(cma) { + return nil + } + + cma = cma.DeepCopy() + graph, err := c.buildConfigurationGraph(cma) + if err != nil { + return err + } + + var state reconcileState + var errs []error + for _, reconciler := range c.reconcilers { + cma, state, err = reconciler.reconcile(ctx, cma, graph) + if err != nil { + errs = append(errs, err) + } + if state == reconcileStop { + break + } + } + + return utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) buildConfigurationGraph(cma *addonv1alpha1.ClusterManagementAddOn) (*configurationGraph, error) { + graph := newGraph(cma.Spec.SupportedConfigs, cma.Status.DefaultConfigReferences) + addons, err := c.managedClusterAddonIndexer.ByIndex(index.ManagedClusterAddonByName, cma.Name) + if err != nil { + return graph, err + } + + // add all existing addons to the default at first + for _, addonObject := range addons { + addon := addonObject.(*addonv1alpha1.ManagedClusterAddOn) + graph.addAddonNode(addon) + } + + if cma.Spec.InstallStrategy.Type == "" || cma.Spec.InstallStrategy.Type == addonv1alpha1.AddonInstallStrategyManual { + return graph, nil + } + + // check each install strategy in status + var errs []error + for _, installProgression := range cma.Status.InstallProgressions { + clusters, err := c.getClustersByPlacement(installProgression.PlacementRef.Name, installProgression.PlacementRef.Namespace) + if errors.IsNotFound(err) { + klog.V(2).Infof("placement %s/%s is not found for addon %s", installProgression.PlacementRef.Namespace, installProgression.PlacementRef.Name, cma.Name) + continue + } + if err != nil { + errs = append(errs, err) + continue + } + + for _, installStrategy := range cma.Spec.InstallStrategy.Placements { + if installStrategy.PlacementRef == installProgression.PlacementRef { + graph.addPlacementNode(installStrategy, installProgression, clusters) + + } + } + } + + return graph, utilerrors.NewAggregate(errs) +} + +func (c *addonConfigurationController) getClustersByPlacement(name, namespace string) ([]string, error) { + var clusters []string + if c.placementLister == nil || c.placementDecisionLister == nil { + return clusters, nil + } + _, err := c.placementLister.Placements(namespace).Get(name) + if err != nil { + return clusters, err + } + + decisionSelector := labels.SelectorFromSet(labels.Set{ + clusterv1beta1.PlacementLabel: name, + }) + decisions, err := c.placementDecisionLister.PlacementDecisions(namespace).List(decisionSelector) + if err != nil { + return clusters, err + } + + for _, d := range decisions { + for _, sd := range d.Status.Decisions { + clusters = append(clusters, sd.ClusterName) + } + } + + return clusters, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go new file mode 100644 index 000000000..8c9705a4c --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go @@ -0,0 +1,328 @@ +package addonconfiguration + +import ( + "fmt" + "math" + "sort" + "strconv" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +var ( + defaultMaxConcurrency = intstr.FromString("25%") + maxMaxConcurrency = intstr.FromString("100%") +) + +// configurationTree is a 2 level snapshot tree on the configuration of addons +// the first level is a list of nodes that represents a install strategy and a desired configuration for this install +// strategy. The second level is a list of nodes that represent each mca and its desired configuration +type configurationGraph struct { + // nodes maintains a list between a installStrategy and its related mcas + nodes []*installStrategyNode + // defaults is the nodes with no install strategy + defaults *installStrategyNode +} + +// installStrategyNode is a node in configurationGraph defined by a install strategy +type installStrategyNode struct { + placementRef addonv1alpha1.PlacementRef + maxConcurrency intstr.IntOrString + desiredConfigs addonConfigMap + // children keeps a map of addons node as the children of this node + children map[string]*addonNode + clusters sets.Set[string] +} + +// addonNode is node as a child of installStrategy node represting a mca +// addonnode +type addonNode struct { + desiredConfigs addonConfigMap + mca *addonv1alpha1.ManagedClusterAddOn + // record mca upgrade status + mcaUpgradeStatus upgradeStatus +} + +type upgradeStatus int + +const ( + // mca desired configs not synced from desiredConfigs yet + toupgrade upgradeStatus = iota + // mca desired configs upgraded and last applied configs not upgraded + upgrading + // both desired configs and last applied configs are upgraded + upgraded +) + +type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference + +// set addon upgrade status +func (n *addonNode) setUpgradeStatus() { + if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) { + n.mcaUpgradeStatus = toupgrade + return + } + + for _, actual := range n.mca.Status.ConfigReferences { + if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok { + if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = toupgrade + return + } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { + n.mcaUpgradeStatus = upgrading + return + } + } else { + n.mcaUpgradeStatus = toupgrade + return + } + } + + n.mcaUpgradeStatus = upgraded +} + +func (d addonConfigMap) copy() addonConfigMap { + output := addonConfigMap{} + for k, v := range d { + output[k] = v + } + return output +} + +func newGraph(supportedConfigs []addonv1alpha1.ConfigMeta, defaultConfigReferences []addonv1alpha1.DefaultConfigReference) *configurationGraph { + graph := &configurationGraph{ + nodes: []*installStrategyNode{}, + defaults: &installStrategyNode{ + maxConcurrency: maxMaxConcurrency, + desiredConfigs: map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference{}, + children: map[string]*addonNode{}, + }, + } + + // init graph.defaults.desiredConfigs with supportedConfigs + for _, config := range supportedConfigs { + if config.DefaultConfig != nil { + graph.defaults.desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: *config.DefaultConfig, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: *config.DefaultConfig, + }, + } + } + } + // copy the spechash from cma status defaultConfigReferences + for _, configRef := range defaultConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + defaultsDesiredConfig, ok := graph.defaults.desiredConfigs[configRef.ConfigGroupResource] + if ok && (defaultsDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + defaultsDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + + return graph +} + +// addAddonNode to the graph, starting from placement with the highest order +func (g *configurationGraph) addAddonNode(mca *addonv1alpha1.ManagedClusterAddOn) { + for i := len(g.nodes) - 1; i >= 0; i-- { + if g.nodes[i].clusters.Has(mca.Namespace) { + g.nodes[i].addNode(mca) + return + } + } + + g.defaults.addNode(mca) +} + +// addNode delete clusters on existing graph so the new configuration overrides the previous +func (g *configurationGraph) addPlacementNode( + installStrategy addonv1alpha1.PlacementStrategy, + installProgression addonv1alpha1.InstallProgression, + clusters []string, +) { + placementRef := installProgression.PlacementRef + installConfigReference := installProgression.ConfigReferences + + node := &installStrategyNode{ + placementRef: placementRef, + maxConcurrency: maxMaxConcurrency, + desiredConfigs: g.defaults.desiredConfigs, + children: map[string]*addonNode{}, + clusters: sets.New[string](clusters...), + } + + // set max concurrency + if installStrategy.RolloutStrategy.Type == addonv1alpha1.AddonRolloutStrategyRollingUpdate { + if installStrategy.RolloutStrategy.RollingUpdate != nil { + node.maxConcurrency = installStrategy.RolloutStrategy.RollingUpdate.MaxConcurrency + } else { + node.maxConcurrency = defaultMaxConcurrency + } + } + + // overrides configuration by install strategy + if len(installConfigReference) > 0 { + node.desiredConfigs = node.desiredConfigs.copy() + for _, configRef := range installConfigReference { + if configRef.DesiredConfig == nil { + continue + } + node.desiredConfigs[configRef.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: configRef.ConfigGroupResource, + ConfigReferent: configRef.DesiredConfig.ConfigReferent, + DesiredConfig: configRef.DesiredConfig.DeepCopy(), + } + } + } + + // remove addon in defaults and other placements. + for _, cluster := range clusters { + if _, ok := g.defaults.children[cluster]; ok { + node.addNode(g.defaults.children[cluster].mca) + delete(g.defaults.children, cluster) + } + for _, placement := range g.nodes { + if _, ok := placement.children[cluster]; ok { + node.addNode(placement.children[cluster].mca) + delete(placement.children, cluster) + } + } + } + g.nodes = append(g.nodes, node) +} + +func (g *configurationGraph) getPlacementNodes() map[addonv1alpha1.PlacementRef]*installStrategyNode { + placementNodeMap := map[addonv1alpha1.PlacementRef]*installStrategyNode{} + for _, node := range g.nodes { + placementNodeMap[node.placementRef] = node + } + + return placementNodeMap +} + +func (g *configurationGraph) addonToUpdate() []*addonNode { + var addons []*addonNode + for _, node := range g.nodes { + addons = append(addons, node.addonToUpdate()...) + } + + addons = append(addons, g.defaults.addonToUpdate()...) + + return addons +} + +func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn) { + n.children[addon.Namespace] = &addonNode{ + mca: addon, + desiredConfigs: n.desiredConfigs, + } + + // override configuration by mca spec + if len(addon.Spec.Configs) > 0 { + n.children[addon.Namespace].desiredConfigs = n.children[addon.Namespace].desiredConfigs.copy() + // TODO we should also filter out the configs which are not supported configs. + for _, config := range addon.Spec.Configs { + n.children[addon.Namespace].desiredConfigs[config.ConfigGroupResource] = addonv1alpha1.ConfigReference{ + ConfigGroupResource: config.ConfigGroupResource, + ConfigReferent: config.ConfigReferent, + DesiredConfig: &addonv1alpha1.ConfigSpecHash{ + ConfigReferent: config.ConfigReferent, + }, + } + // copy the spechash from mca status + for _, configRef := range addon.Status.ConfigReferences { + if configRef.DesiredConfig == nil { + continue + } + nodeDesiredConfig, ok := n.children[addon.Namespace].desiredConfigs[configRef.ConfigGroupResource] + if ok && (nodeDesiredConfig.DesiredConfig.ConfigReferent == configRef.DesiredConfig.ConfigReferent) { + nodeDesiredConfig.DesiredConfig.SpecHash = configRef.DesiredConfig.SpecHash + } + } + } + } + + // set addon node upgrade status + n.children[addon.Namespace].setUpgradeStatus() +} + +func (n *installStrategyNode) addonUpgraded() int { + count := 0 + for _, addon := range n.children { + if addon.mcaUpgradeStatus == upgraded { + count += 1 + } + } + return count +} + +func (n *installStrategyNode) addonUpgrading() int { + count := 0 + for _, addon := range n.children { + if addon.mcaUpgradeStatus == upgrading { + count += 1 + } + } + return count +} + +// addonToUpdate finds the addons to be updated by placement +func (n *installStrategyNode) addonToUpdate() []*addonNode { + var addons []*addonNode + + // sort the children by key + keys := make([]string, 0, len(n.children)) + for k := range n.children { + keys = append(keys, k) + } + sort.Strings(keys) + + total := len(n.clusters) + if total == 0 { + total = len(n.children) + } + + length, _ := parseMaxConcurrency(n.maxConcurrency, total) + if length == 0 { + return addons + } + + for i, k := range keys { + if (i%length == 0) && len(addons) > 0 { + return addons + } + + addon := n.children[k] + if addon.mcaUpgradeStatus != upgraded { + addons = append(addons, addon) + } + } + + return addons +} + +func parseMaxConcurrency(maxConcurrency intstr.IntOrString, total int) (int, error) { + var length int + + switch maxConcurrency.Type { + case intstr.String: + str := maxConcurrency.StrVal + f, err := strconv.ParseFloat(str[:len(str)-1], 64) + if err != nil { + return length, err + } + length = int(math.Ceil(f / 100 * float64(total))) + case intstr.Int: + length = maxConcurrency.IntValue() + default: + return length, fmt.Errorf("incorrect MaxConcurrency type %v", maxConcurrency.Type) + } + + return length, nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go new file mode 100644 index 000000000..4c44d700e --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/mgmt_addon_progressing_reconciler.go @@ -0,0 +1,139 @@ +package addonconfiguration + +import ( + "context" + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" +) + +type clusterManagementAddonProgressingReconciler struct { + addonClient addonv1alpha1client.Interface +} + +func (d *clusterManagementAddonProgressingReconciler) reconcile( + ctx context.Context, cma *addonv1alpha1.ClusterManagementAddOn, graph *configurationGraph) (*addonv1alpha1.ClusterManagementAddOn, reconcileState, error) { + var errs []error + cmaCopy := cma.DeepCopy() + placementNodes := graph.getPlacementNodes() + + // go through addons and update condition per install progression + for i, installProgression := range cmaCopy.Status.InstallProgressions { + placementNode, exist := placementNodes[installProgression.PlacementRef] + if !exist { + continue + } + + isUpgrade := false + + for _, configReference := range installProgression.ConfigReferences { + if configReference.LastAppliedConfig != nil { + isUpgrade = true + break + } + } + + setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i], + isUpgrade, + placementNode.addonUpgrading(), + placementNode.addonUpgraded(), + len(placementNode.clusters), + ) + } + + err := d.patchMgmtAddonStatus(ctx, cmaCopy, cma) + if err != nil { + errs = append(errs, err) + } + return cmaCopy, reconcileContinue, utilerrors.NewAggregate(errs) +} + +func (d *clusterManagementAddonProgressingReconciler) patchMgmtAddonStatus(ctx context.Context, new, old *addonv1alpha1.ClusterManagementAddOn) error { + if equality.Semantic.DeepEqual(new.Status, old.Status) { + return nil + } + + oldData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: old.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + newData, err := json.Marshal(&addonv1alpha1.ClusterManagementAddOn{ + ObjectMeta: metav1.ObjectMeta{ + UID: new.UID, + ResourceVersion: new.ResourceVersion, + }, + Status: addonv1alpha1.ClusterManagementAddOnStatus{ + InstallProgressions: new.Status.InstallProgressions, + }, + }) + if err != nil { + return err + } + + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + if err != nil { + return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) + } + + klog.V(2).Infof("Patching clustermanagementaddon %s status with %s", new.Name, string(patchBytes)) + _, err = d.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + return err +} + +func setAddOnInstallProgressionsAndLastApplied(installProgression *addonv1alpha1.InstallProgression, isUpgrade bool, progressing, done, total int) { + // always update progressing condition when there is no config + // skip update progressing condition when last applied config already the same as desired + skip := len(installProgression.ConfigReferences) > 0 + for _, configReference := range installProgression.ConfigReferences { + if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) && + !equality.Semantic.DeepEqual(configReference.LastKnownGoodConfig, configReference.DesiredConfig) { + skip = false + } + } + if skip { + return + } + condition := metav1.Condition{ + Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, + } + if (total == 0 && done == 0) || (done != total) { + condition.Status = metav1.ConditionTrue + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgrading + condition.Message = fmt.Sprintf("%d/%d upgrading...", progressing+done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstalling + condition.Message = fmt.Sprintf("%d/%d installing...", progressing+done, total) + } + } else { + for i, configRef := range installProgression.ConfigReferences { + installProgression.ConfigReferences[i].LastAppliedConfig = configRef.DesiredConfig.DeepCopy() + installProgression.ConfigReferences[i].LastKnownGoodConfig = configRef.DesiredConfig.DeepCopy() + } + condition.Status = metav1.ConditionFalse + if isUpgrade { + condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed + condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors.", done, total) + } else { + condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed + condition.Message = fmt.Sprintf("%d/%d install completed with no errors.", done, total) + } + } + meta.SetStatusCondition(&installProgression.Conditions, condition) +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go new file mode 100644 index 000000000..beff5b4bd --- /dev/null +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner/controller.go @@ -0,0 +1,100 @@ +package addonowner + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" + addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/utils" +) + +const UnsupportedConfigurationType = "UnsupportedConfiguration" + +// addonOwnerController reconciles instances of managedclusteradd on the hub +// to add related ClusterManagementAddon as the owner. +type addonOwnerController struct { + addonClient addonv1alpha1client.Interface + managedClusterAddonLister addonlisterv1alpha1.ManagedClusterAddOnLister + clusterManagementAddonLister addonlisterv1alpha1.ClusterManagementAddOnLister + addonFilterFunc factory.EventFilterFunc +} + +func NewAddonOwnerController( + addonClient addonv1alpha1client.Interface, + addonInformers addoninformerv1alpha1.ManagedClusterAddOnInformer, + clusterManagementAddonInformers addoninformerv1alpha1.ClusterManagementAddOnInformer, + addonFilterFunc factory.EventFilterFunc, +) factory.Controller { + c := &addonOwnerController{ + addonClient: addonClient, + managedClusterAddonLister: addonInformers.Lister(), + clusterManagementAddonLister: clusterManagementAddonInformers.Lister(), + addonFilterFunc: addonFilterFunc, + } + + return factory.New().WithFilteredEventsInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + c.addonFilterFunc, clusterManagementAddonInformers.Informer()). + WithInformersQueueKeysFunc( + func(obj runtime.Object) []string { + key, _ := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + return []string{key} + }, + addonInformers.Informer()).WithSync(c.sync).ToController("addon-owner-controller") +} + +func (c *addonOwnerController) sync(ctx context.Context, syncCtx factory.SyncContext, key string) error { + klog.V(4).Infof("Reconciling addon %q", key) + + namespace, addonName, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + // ignore addon whose key is invalid + return nil + } + + addon, err := c.managedClusterAddonLister.ManagedClusterAddOns(namespace).Get(addonName) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return err + } + + addonCopy := addon.DeepCopy() + modified := false + + clusterManagementAddon, err := c.clusterManagementAddonLister.Get(addonName) + if errors.IsNotFound(err) { + return nil + } + + if err != nil { + return err + } + + if !c.addonFilterFunc(clusterManagementAddon) { + return nil + } + + owner := metav1.NewControllerRef(clusterManagementAddon, addonapiv1alpha1.GroupVersion.WithKind("ClusterManagementAddOn")) + modified = utils.MergeOwnerRefs(&addonCopy.OwnerReferences, *owner, false) + if modified { + _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(namespace).Update(ctx, addonCopy, metav1.UpdateOptions{}) + return err + } + + return nil +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go index 8e122be20..bbfba2ffe 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/config_checker.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" "os" + "path/filepath" "sync" "k8s.io/apiserver/pkg/server/healthz" @@ -31,7 +32,9 @@ type configChecker struct { // Case1: Embeding configchecker into the current server // // In this case, we simply initialize a configchecker and add it to the current in used healthz.Checkers. -// You can check here for a reference: https://github.com/open-cluster-management/multicloud-operators-foundation/blob/56270b1520ec5896981db689b3afe0cd893cad8e/cmd/agent/agent.go#L148 +// You can check here for a reference: +// +// https://github.com/open-cluster-management/multicloud-operators-foundation/blob/56270b1520ec5896981db689b3afe0cd893cad8e/cmd/agent/agent.go#L148 // // ----------------------------------------------------------------------------- // @@ -85,8 +88,9 @@ func NewConfigChecker(name string, configfiles ...string) (*configChecker, error } // SetReload can update the ‘reload’ fields of config checker -// If reload equals to false, config checker won't update the checksum value in the cache, and function Check would return error forever if config files are modified. -// but if reload equals to true, config checker only returns err once, and it updates the cache with the latest checksum of config files. +// If reload equals to false, config checker won't update the checksum value in the cache, and function Check would +// return error forever if config files are modified. but if reload equals to true, config checker only returns err +// once, and it updates the cache with the latest checksum of config files. func (c *configChecker) SetReload(reload bool) { c.reload = reload } @@ -98,7 +102,8 @@ func (c *configChecker) Name() string { // Check would return nil if current configfiles's checksum is equal to cached checksum // If checksum not equal, it will return err and update cached checksum with current checksum -// Note that: configChecker performs a instant update after it returns err, so DO NOT use one configChecker for multible containers!!! +// Note that: configChecker performs a instant update after it returns err, so DO NOT use one +// configChecker for multible containers!!! func (cc *configChecker) Check(_ *http.Request) error { newChecksum, err := load(cc.configfiles) if err != nil { @@ -119,7 +124,7 @@ func (cc *configChecker) Check(_ *http.Request) error { func load(configfiles []string) ([32]byte, error) { var allContent []byte for _, c := range configfiles { - content, err := os.ReadFile(c) + content, err := os.ReadFile(filepath.Clean(c)) if err != nil { return [32]byte{}, fmt.Errorf("read %s failed, %v", c, err) } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go index 493fbc101..037898928 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/csr_helpers.go @@ -18,9 +18,10 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/restmapper" "k8s.io/klog/v2" - "open-cluster-management.io/addon-framework/pkg/agent" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" ) var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) @@ -38,7 +39,7 @@ func DefaultSignerWithExpiry(caKey, caData []byte, duration time.Duration) agent blockTlsKey, _ := pem.Decode(caKey) // For now only PKCS#1 is supported which assures the private key algorithm is RSA. - // TODO: Compatiblity w/ PKCS#8 key e.g. EC algorithm + // TODO: Compatibility w/ PKCS#8 key e.g. EC algorithm key, err := x509.ParsePKCS1PrivateKey(blockTlsKey.Bytes) if err != nil { klog.Errorf("Failed to parse key: %v", err) diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go index f803e4b90..edc26e4e9 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/helpers.go @@ -9,17 +9,20 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/klog/v2" + addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" + + "open-cluster-management.io/addon-framework/pkg/agent" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" ) func MergeRelatedObjects(modified *bool, objs *[]addonapiv1alpha1.ObjectReference, obj addonapiv1alpha1.ObjectReference) { @@ -161,7 +164,7 @@ func ApplySecret(ctx context.Context, client coreclientv1.SecretsGetter, require actual, err = client.Secrets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) if err == nil { - return actual, true, err + return actual, true, nil } if !strings.Contains(err.Error(), "field is immutable") { return actual, true, err @@ -279,6 +282,60 @@ func PatchAddonCondition(ctx context.Context, addonClient addonv1alpha1client.In } klog.V(2).Infof("Patching addon %s/%s condition with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch(ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + _, err = addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( + ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } + +// AddonManagementFilterFunc is to check if the addon should be managed by addon manager or self-managed +type AddonManagementFilterFunc func(cma *addonapiv1alpha1.ClusterManagementAddOn) bool + +func ManagedByAddonManager(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + annotations := accessor.GetAnnotations() + if len(annotations) == 0 { + return false + } + + value, ok := annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] + if !ok { + return false + } + + return value == addonapiv1alpha1.AddonLifecycleAddonManagerAnnotationValue +} + +func ManagedBySelf(agentAddons map[string]agent.AgentAddon) factory.EventFilterFunc { + return func(obj interface{}) bool { + accessor, _ := meta.Accessor(obj) + if _, ok := agentAddons[accessor.GetName()]; !ok { + return false + } + + annotations := accessor.GetAnnotations() + + if len(annotations) == 0 { + return true + } + + value, ok := annotations[addonapiv1alpha1.AddonLifecycleAnnotationKey] + if !ok { + return true + } + + return value == addonapiv1alpha1.AddonLifecycleSelfManageAnnotationValue + } +} + +func IsOwnedByCMA(addon *addonapiv1alpha1.ManagedClusterAddOn) bool { + for _, owner := range addon.OwnerReferences { + if owner.Kind != "ClusterManagementAddOn" { + continue + } + if owner.Name != addon.Name { + continue + } + return true + } + return false +} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go index 6c26d3ddb..8108fd034 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/permission.go @@ -11,9 +11,14 @@ import ( "k8s.io/client-go/kubernetes" rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/utils/pointer" - "open-cluster-management.io/addon-framework/pkg/agent" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" +) + +const ( + RoleRefKindUser = "User" ) // RBACPermissionBuilder builds a agent.PermissionConfigFunc that applies Kubernetes RBAC policies. @@ -230,7 +235,9 @@ func ApplyClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGette // ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs // TODO on non-matching roleref, delete and recreate -func ApplyClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRoleBindingsGetter, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { +func ApplyClusterRoleBinding(ctx context.Context, + client rbacclientv1.ClusterRoleBindingsGetter, + required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { existing, err := client.ClusterRoleBindings().Get(ctx, required.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { requiredCopy := required.DeepCopy() @@ -248,14 +255,14 @@ func ApplyClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRol // Enforce apiGroup fields in roleRefs existingCopy.RoleRef.APIGroup = rbacv1.GroupName for i := range existingCopy.Subjects { - if existingCopy.Subjects[i].Kind == "User" { + if existingCopy.Subjects[i].Kind == RoleRefKindUser { existingCopy.Subjects[i].APIGroup = rbacv1.GroupName } } requiredCopy.RoleRef.APIGroup = rbacv1.GroupName for i := range requiredCopy.Subjects { - if requiredCopy.Subjects[i].Kind == "User" { + if requiredCopy.Subjects[i].Kind == RoleRefKindUser { requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName } } @@ -320,14 +327,14 @@ func ApplyRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGette // Enforce apiGroup fields in roleRefs and subjects existingCopy.RoleRef.APIGroup = rbacv1.GroupName for i := range existingCopy.Subjects { - if existingCopy.Subjects[i].Kind == "User" { + if existingCopy.Subjects[i].Kind == RoleRefKindUser { existingCopy.Subjects[i].APIGroup = rbacv1.GroupName } } requiredCopy.RoleRef.APIGroup = rbacv1.GroupName for i := range requiredCopy.Subjects { - if requiredCopy.Subjects[i].Kind == "User" { + if requiredCopy.Subjects[i].Kind == RoleRefKindUser { requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName } } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go b/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go index 58aee78f8..2dd4c1323 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/utils/probe_helper.go @@ -4,8 +4,9 @@ import ( "fmt" "k8s.io/apimachinery/pkg/types" - "open-cluster-management.io/addon-framework/pkg/agent" workapiv1 "open-cluster-management.io/api/work/v1" + + "open-cluster-management.io/addon-framework/pkg/agent" ) // DeploymentProber is to check the addon status based on status diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go index b986e219c..a40933749 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go @@ -337,9 +337,9 @@ const ( // valid in Hosted mode. ManagedClusterAddOnHostingClusterValidity = "HostingClusterValidity" - // ManagedClusterAddOnUnsupportedConfigurationType is a condition type representing whether the config resources - // are supported. - ManagedClusterAddOnUnsupportedConfigurationType = "UnsupportedConfiguration" + // ManagedClusterAddOnRegistrationApplied is a condition type representing whether the registration of + // the addon agent is configured. + ManagedClusterAddOnRegistrationApplied = "RegistrationApplied" ) // the reasons of condition ManagedClusterAddOnConditionAvailable @@ -411,13 +411,52 @@ const ( HostingClusterValidityReasonInvalid = "HostingClusterInvalid" ) -// the reason of condition ManagedClusterAddOnUnsupportedConfigurationType +// the reason of condition ManagedClusterAddOnConditionProgressing const ( - // AddonReasonConfigurationSupported is the reason of condition UnsupportedConfiguration indicating the configuration - // in clusterManagementAddon is supported. - AddonReasonConfigurationSupported = "ConfigurationSupported" + // ProgressingReasonInstalling is the reason of condition Progressing indicating the addon configuration is + // installing. + ProgressingReasonInstalling = "Installing" - // AddonReasonConfigurationUnsupported is the reason of condition UnsupportedConfiguration indicating the configuration - // in clusterManagementAddon is not supported. - AddonReasonConfigurationUnsupported = "ConfigurationUnsupported" + // ProgressingReasonInstallSucceed is the reason of condition Progressing indicating the addon configuration is + // installed successfully. + ProgressingReasonInstallSucceed = "InstallSucceed" + + // ProgressingReasonInstallFailed is the reason of condition Progressing indicating the addon configuration is + // installed failed. + ProgressingReasonInstallFailed = "InstallFailed" + + // ProgressingReasonUpgrading is the reason of condition Progressing indicating the addon configuration is + // upgrading. + ProgressingReasonUpgrading = "Upgrading" + + // ProgressingReasonUpgradeSucceed is the reason of condition Progressing indicating the addon configuration is + // upgraded successfully. + ProgressingReasonUpgradeSucceed = "UpgradeSucceed" + + // ProgressingReasonUpgradeFailed is the reason of condition Progressing indicating the addon configuration is + // upgraded failed. + ProgressingReasonUpgradeFailed = "UpgradeFailed" + + // ProgressingReasonWaitingForCanary is the reason of condition Progressing indicating the addon configuration + // upgrade is pending and waiting for canary is done. + ProgressingReasonWaitingForCanary = "WaitingForCanary" + + // ProgressingReasonConfigurationUnsupported is the reason of condition Progressing indicating the addon configuration + // is not supported. + ProgressingReasonConfigurationUnsupported = "ConfigurationUnsupported" +) + +// the reasons of condition ManagedClusterAddOnRegistrationApplied +const ( + // RegistrationAppliedNilRegistration is the reason of condition RegistrationApplied indicating that there is no + // registration option. + RegistrationAppliedNilRegistration = "NilRegistration" + + // RegistrationAppliedSetPermissionFailed is the reason of condition RegistrationApplied indicating that it is + // failed to set up rbac for the addon agent. + RegistrationAppliedSetPermissionFailed = "SetPermissionFailed" + + // RegistrationAppliedSetPermissionApplied is the reason of condition RegistrationApplied indicating that it is + // successful to set up rbac for the addon agent. + RegistrationAppliedSetPermissionApplied = "SetPermissionApplied" ) diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workapplier/workapplier.go b/vendor/open-cluster-management.io/api/utils/work/v1/workapplier/workapplier.go index a15d2016c..817110f67 100644 --- a/vendor/open-cluster-management.io/api/utils/work/v1/workapplier/workapplier.go +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workapplier/workapplier.go @@ -5,6 +5,9 @@ import ( "encoding/json" "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" @@ -46,15 +49,20 @@ func NewWorkApplierWithTypedClient(workClient workv1client.Interface, func (w *WorkApplier) Apply(ctx context.Context, work *workapiv1.ManifestWork) (*workapiv1.ManifestWork, error) { existingWork, err := w.getWork(ctx, work.Namespace, work.Name) existingWork = existingWork.DeepCopy() - if err != nil { - if errors.IsNotFound(err) { - existingWork, err = w.createWork(ctx, work) - if err == nil { - w.cache.updateCache(work, existingWork) - return existingWork, nil - } + if errors.IsNotFound(err) { + existingWork, err = w.createWork(ctx, work) + switch { + case errors.IsAlreadyExists(err): + return work, nil + case err != nil: return nil, err + default: + w.cache.updateCache(work, existingWork) + return existingWork, nil } + } + + if err != nil { return nil, err } @@ -62,11 +70,16 @@ func (w *WorkApplier) Apply(ctx context.Context, work *workapiv1.ManifestWork) ( return existingWork, nil } - if ManifestWorkSpecEqual(work.Spec, existingWork.Spec) { + if ManifestWorkEqual(work, existingWork) { return existingWork, nil } oldData, err := json.Marshal(&workapiv1.ManifestWork{ + ObjectMeta: metav1.ObjectMeta{ + Labels: existingWork.Labels, + Annotations: existingWork.Annotations, + OwnerReferences: existingWork.OwnerReferences, + }, Spec: existingWork.Spec, }) if err != nil { @@ -77,6 +90,9 @@ func (w *WorkApplier) Apply(ctx context.Context, work *workapiv1.ManifestWork) ( ObjectMeta: metav1.ObjectMeta{ UID: existingWork.UID, ResourceVersion: existingWork.ResourceVersion, + Labels: work.Labels, + Annotations: work.Annotations, + OwnerReferences: work.OwnerReferences, }, Spec: work.Spec, }) @@ -111,28 +127,60 @@ func (w *WorkApplier) Delete(ctx context.Context, namespace, name string) error return nil } -func manifestsEqual(new, old []workapiv1.Manifest) bool { - if len(new) != len(old) { - return false +func shouldUpdateMap(required, existing map[string]string) bool { + if len(required) > len(existing) { + return true } - - for i := range new { - if !equality.Semantic.DeepEqual(new[i].Raw, old[i].Raw) { - return false + for key, value := range required { + if existing[key] != value { + return true } } - return true + return false } -func ManifestWorkSpecEqual(new, old workapiv1.ManifestWorkSpec) bool { - if !manifestsEqual(new.Workload.Manifests, old.Workload.Manifests) { +func ManifestWorkEqual(new, old *workapiv1.ManifestWork) bool { + mutatedNewWork := mutateWork(new) + mutatedOldWork := mutateWork(old) + if !equality.Semantic.DeepEqual(mutatedNewWork.Spec, mutatedOldWork.Spec) { return false } - if !equality.Semantic.DeepEqual(new.ManifestConfigs, old.ManifestConfigs) { + + if shouldUpdateMap(mutatedNewWork.Annotations, mutatedOldWork.Annotations) { + return false + } + if shouldUpdateMap(mutatedNewWork.Labels, mutatedOldWork.Labels) { return false } - if !equality.Semantic.DeepEqual(new.DeleteOption, old.DeleteOption) { + if !equality.Semantic.DeepEqual(mutatedNewWork.OwnerReferences, mutatedOldWork.OwnerReferences) { return false } return true } + +// mutate work to easy compare works. +func mutateWork(work *workapiv1.ManifestWork) *workapiv1.ManifestWork { + mutatedWork := work.DeepCopy() + newManifests := []workapiv1.Manifest{} + for _, manifest := range work.Spec.Workload.Manifests { + unstructuredManifest := &unstructured.Unstructured{} + if err := unstructuredManifest.UnmarshalJSON(manifest.Raw); err != nil { + klog.Errorf("failed to unmarshal work manifest.err: %v", err) + return mutatedWork + } + + // the filed creationTimestamp should be removed before compare since it is added during transition from raw data to runtime object. + unstructured.RemoveNestedField(unstructuredManifest.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(unstructuredManifest.Object, "spec", "template", "metadata", "creationTimestamp") + + newManifestRaw, err := unstructuredManifest.MarshalJSON() + if err != nil { + klog.Errorf("failed to marshal work manifest.err: %v", err) + return mutatedWork + } + newManifests = append(newManifests, workapiv1.Manifest{RawExtension: runtime.RawExtension{Raw: newManifestRaw}}) + } + + mutatedWork.Spec.Workload.Manifests = newManifests + return mutatedWork +} diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go b/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go index daf4df9d1..d186d08ea 100644 --- a/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go +++ b/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go @@ -167,12 +167,11 @@ func (f *internalWorkBuilder) buildManifestWorks(objects []runtime.Object) (appl } // this step to update the existing manifestWorks, update the existing manifests and delete non-existing manifest - for workIndex := 0; workIndex < len(f.existingManifestWorks); workIndex++ { - work := f.existingManifestWorks[workIndex].DeepCopy() - f.setManifestWorkOptions(work) - work.Spec.Workload.Manifests = []workapiv1.Manifest{} - for manifestIndex := 0; manifestIndex < len(f.existingManifestWorks[workIndex].Spec.Workload.Manifests); manifestIndex++ { - manifest := f.existingManifestWorks[workIndex].Spec.Workload.Manifests[manifestIndex] + for _, existingWork := range f.existingManifestWorks { + // new a work with init work meta and keep the existing work name. + requiredWork := f.initManifestWorkWithName(existingWork.Name) + + for _, manifest := range existingWork.Spec.Workload.Manifests { key, err := generateManifestKey(manifest) if err != nil { return nil, nil, err @@ -181,14 +180,14 @@ func (f *internalWorkBuilder) buildManifestWorks(objects []runtime.Object) (appl // currently,we have 80% threshold for the size of manifests, update directly. // TODO: need to consider if the size of updated manifests is more then the limit of manifestWork. if _, ok := requiredMapper[key]; ok { - work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, requiredMapper[key]) + requiredWork.Spec.Workload.Manifests = append(requiredWork.Spec.Workload.Manifests, requiredMapper[key]) delete(requiredMapper, key) continue } } updatedWorks = append(updatedWorks, manifestWorkBuffer{ - work: work, - buffer: f.bufferOfManifestWork(work), + work: requiredWork, + buffer: f.bufferOfManifestWork(requiredWork), }) } @@ -271,6 +270,13 @@ func (f *internalWorkBuilder) initManifestWork(index int) *workapiv1.ManifestWor return work } +// init a work with existing name +func (f *internalWorkBuilder) initManifestWorkWithName(workName string) *workapiv1.ManifestWork { + work := f.initManifestWork(0) + work.SetName(workName) + return work +} + func (f *internalWorkBuilder) setManifestWorkOptions(work *workapiv1.ManifestWork) { // currently we set the options to each generated manifestWorks work.Spec.DeleteOption = f.deletionOption