Skip to content

Commit

Permalink
🐛 Wait until the resources is ready to start controller-manager (stol…
Browse files Browse the repository at this point in the history
…ostron#890)

* Wait until the resources is ready to start controller-manager

Signed-off-by: myan <[email protected]>

* fmt

Signed-off-by: Meng Yan <[email protected]>

* add crd controller

Signed-off-by: Meng Yan <[email protected]>

* add global hub controller

Signed-off-by: Meng Yan <[email protected]>

* fix the backup

Signed-off-by: Meng Yan <[email protected]>

* fix backup error

Signed-off-by: Meng Yan <[email protected]>

* skip the middleware test

Signed-off-by: Meng Yan <[email protected]>

* dynamically cache

Signed-off-by: Meng Yan <[email protected]>

* remove dynamic cache controller

Signed-off-by: Meng Yan <[email protected]>

* fix hubofhub test

Signed-off-by: Meng Yan <[email protected]>

* f

Signed-off-by: Meng Yan <[email protected]>

* remove unnessary code

Signed-off-by: Meng Yan <[email protected]>

---------

Signed-off-by: myan <[email protected]>
Signed-off-by: Meng Yan <[email protected]>
  • Loading branch information
yanmxa authored Apr 28, 2024
1 parent 465862b commit c8c17db
Show file tree
Hide file tree
Showing 26 changed files with 890 additions and 653 deletions.
290 changes: 17 additions & 273 deletions operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,113 +20,23 @@ import (
"context"
"flag"
"os"
"strconv"
"time"

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
kafkav1beta2 "github.com/RedHatInsights/strimzi-client-go/apis/kafka.strimzi.io/v1beta2"
routev1 "github.com/openshift/api/route/v1"
subv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
operatorsv1 "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/apis/operators/v1"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/spf13/pflag"
agentv1 "github.com/stolostron/klusterlet-addon-controller/pkg/apis/agent/v1"
mchv1 "github.com/stolostron/multiclusterhub-operator/api/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2"
workv1 "open-cluster-management.io/api/work/v1"
policyv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
chnv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1"
placementrulesv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1"
appsubv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1"
appsubV1alpha1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1alpha1"
applicationv1beta1 "sigs.k8s.io/application/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"

globalhubv1alpha4 "github.com/stolostron/multicluster-global-hub/operator/apis/v1alpha4"
operatorconstants "github.com/stolostron/multicluster-global-hub/operator/pkg/constants"
hubofhubsaddon "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/addon"
backupcontrollers "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/backup"
hubofhubscontrollers "github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/hubofhubs"
"github.com/stolostron/multicluster-global-hub/pkg/constants"
commonobjects "github.com/stolostron/multicluster-global-hub/pkg/objects"
"github.com/stolostron/multicluster-global-hub/operator/pkg/config"
"github.com/stolostron/multicluster-global-hub/operator/pkg/controllers/crd"
"github.com/stolostron/multicluster-global-hub/pkg/utils"
)

var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")

labelSelector = labels.SelectorFromSet(
labels.Set{
constants.GlobalHubOwnerLabelKey: constants.GHOperatorOwnerLabelVal,
},
)
namespacePath = "metadata.namespace"
)

func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(routev1.AddToScheme(scheme))
utilruntime.Must(operatorsv1.AddToScheme(scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme))
utilruntime.Must(clusterv1beta1.AddToScheme(scheme))
utilruntime.Must(clusterv1beta2.AddToScheme(scheme))
utilruntime.Must(workv1.AddToScheme(scheme))
utilruntime.Must(addonv1alpha1.AddToScheme(scheme))
utilruntime.Must(globalhubv1alpha4.AddToScheme(scheme))
utilruntime.Must(appsubv1.SchemeBuilder.AddToScheme(scheme))
utilruntime.Must(appsubV1alpha1.AddToScheme(scheme))
utilruntime.Must(subv1alpha1.AddToScheme(scheme))
utilruntime.Must(chnv1.AddToScheme(scheme))
utilruntime.Must(placementrulesv1.AddToScheme(scheme))
utilruntime.Must(policyv1.AddToScheme(scheme))
utilruntime.Must(applicationv1beta1.AddToScheme(scheme))
utilruntime.Must(admissionregistrationv1.AddToScheme(scheme))
utilruntime.Must(mchv1.AddToScheme(scheme))
utilruntime.Must(agentv1.SchemeBuilder.AddToScheme(scheme))
utilruntime.Must(promv1.AddToScheme(scheme))
utilruntime.Must(apiextensionsv1.AddToScheme(scheme))

// add Kafka scheme
utilruntime.Must(kafkav1beta2.AddToScheme(scheme))

// +kubebuilder:scaffold:scheme
}

type operatorConfig struct {
MetricsAddress string
ProbeAddress string
PodNamespace string
LeaderElection bool
GlobalResourceEnabled bool
LogLevel string
}
var setupLog = ctrl.Log.WithName("setup")

func main() {
os.Exit(doMain(ctrl.SetupSignalHandler(), ctrl.GetConfigOrDie()))
Expand All @@ -142,84 +52,21 @@ func doMain(ctx context.Context, cfg *rest.Config) int {
return 1
}

controllerConfigMap, err := kubeClient.CoreV1().ConfigMaps(utils.GetDefaultNamespace()).Get(
ctx, operatorconstants.ControllerConfig, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
setupLog.Error(err, "failed to get controller config")
return 1
}
controllerConfigMap = nil
}

electionConfig, err := getElectionConfig(controllerConfigMap)
err = config.LoadControllerConfig(ctx, kubeClient)
if err != nil {
setupLog.Error(err, "failed to get election config")
setupLog.Error(err, "failed to load controller config")
return 1
}

mgr, err := getManager(cfg, electionConfig, operatorConfig)
mgr, err := getManager(cfg, operatorConfig)
if err != nil {
setupLog.Error(err, "unable to start manager")
return 1
}

// middlewareCfg is shared between all controllers
middlewareCfg := &hubofhubscontrollers.MiddlewareConfig{}

// start addon controller
if err = (&hubofhubsaddon.HoHAddonInstaller{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("addon-reconciler"),
}).SetupWithManager(ctx, mgr); err != nil {
setupLog.Error(err, "unable to create addon reconciler")
return 1
}

addonController, err := hubofhubsaddon.NewHoHAddonController(mgr.GetConfig(), mgr.GetClient(),
electionConfig, middlewareCfg, operatorConfig.GlobalResourceEnabled, controllerConfigMap, operatorConfig.LogLevel)
_, err = crd.AddCRDController(mgr, operatorConfig, kubeClient)
if err != nil {
setupLog.Error(err, "unable to create addon controller")
return 1
}
if err = mgr.Add(addonController); err != nil {
setupLog.Error(err, "unable to add addon controller to manager")
return 1
}

if err = (&hubofhubscontrollers.GlobalHubConditionReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("condition-reconciler"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create GlobalHubStatusReconciler")
return 1
}

r := &hubofhubscontrollers.MulticlusterGlobalHubReconciler{
Manager: mgr,
Client: mgr.GetClient(),
AddonManager: addonController.AddonManager(),
KubeClient: kubeClient,
Scheme: mgr.GetScheme(),
LeaderElection: electionConfig,
Log: ctrl.Log.WithName("global-hub-reconciler"),
MiddlewareConfig: middlewareCfg,
EnableGlobalResource: operatorConfig.GlobalResourceEnabled,
LogLevel: operatorConfig.LogLevel,
}

if err = r.SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create MulticlusterGlobalHubReconciler")
return 1
}

backupController := backupcontrollers.NewBackupReconciler(
mgr,
ctrl.Log.WithName("backup-reconciler"),
)

if err = mgr.Add(backupController); err != nil {
setupLog.Error(err, "unable to bakcup controller to manager")
setupLog.Error(err, "unable to create crd controller")
return 1
}

Expand All @@ -241,8 +88,8 @@ func doMain(ctx context.Context, cfg *rest.Config) int {
return 0
}

func parseFlags() *operatorConfig {
config := &operatorConfig{
func parseFlags() *config.OperatorConfig {
config := &config.OperatorConfig{
PodNamespace: utils.GetDefaultNamespace(),
}

Expand Down Expand Up @@ -271,15 +118,17 @@ func parseFlags() *operatorConfig {
return config
}

func getManager(restConfig *rest.Config, electionConfig *commonobjects.LeaderElectionConfig,
operatorConfig *operatorConfig,
) (ctrl.Manager, error) {
func getManager(restConfig *rest.Config, operatorConfig *config.OperatorConfig) (ctrl.Manager, error) {
electionConfig, err := config.GetElectionConfig()
if err != nil {
return nil, err
}
leaseDuration := time.Duration(electionConfig.LeaseDuration) * time.Second
renewDeadline := time.Duration(electionConfig.RenewDeadline) * time.Second
retryPeriod := time.Duration(electionConfig.RetryPeriod) * time.Second

mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
Scheme: config.GetRuntimeScheme(),
Metrics: metricsserver.Options{
BindAddress: operatorConfig.MetricsAddress,
},
Expand All @@ -290,113 +139,8 @@ func getManager(restConfig *rest.Config, electionConfig *commonobjects.LeaderEle
LeaseDuration: &leaseDuration,
RenewDeadline: &renewDeadline,
RetryPeriod: &retryPeriod,
NewCache: initCache,
NewCache: config.InitCache,
})

return mgr, err
}

func getElectionConfig(configMap *corev1.ConfigMap) (*commonobjects.LeaderElectionConfig, error) {
config := &commonobjects.LeaderElectionConfig{
LeaseDuration: 137,
RenewDeadline: 107,
RetryPeriod: 26,
}
if configMap == nil {
return config, nil
}
val, leaseDurationExist := configMap.Data["leaseDuration"]
if leaseDurationExist {
leaseDurationSec, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
config.LeaseDuration = leaseDurationSec
}

val, renewDeadlineExist := configMap.Data["renewDeadline"]
if renewDeadlineExist {
renewDeadlineSec, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
config.RenewDeadline = renewDeadlineSec
}

val, retryPeriodExist := configMap.Data["retryPeriod"]
if retryPeriodExist {
retryPeriodSec, err := strconv.Atoi(val)
if err != nil {
return nil, err
}
config.RetryPeriod = retryPeriodSec
}

return config, nil
}

func initCache(config *rest.Config, cacheOpts cache.Options) (cache.Cache, error) {
cacheOpts.ByObject = map[client.Object]cache.ByObject{
&corev1.Secret{}: {
Field: fields.OneTermEqualSelector(namespacePath, utils.GetDefaultNamespace()),
},
&corev1.ConfigMap{}: {
Field: fields.OneTermEqualSelector(namespacePath, utils.GetDefaultNamespace()),
},
&corev1.ServiceAccount{}: {
Label: labelSelector,
},
&corev1.Service{}: {
Label: labelSelector,
},
&corev1.Namespace{}: {},
&appsv1.Deployment{}: {
Label: labelSelector,
},
&appsv1.StatefulSet{}: {
Label: labelSelector,
},
&rbacv1.Role{}: {
Label: labelSelector,
},
&rbacv1.RoleBinding{}: {
Label: labelSelector,
},
&rbacv1.ClusterRole{}: {
Label: labelSelector,
},
&rbacv1.ClusterRoleBinding{}: {
Label: labelSelector,
},
&routev1.Route{}: {
Label: labelSelector,
},
&clusterv1.ManagedCluster{}: {
Label: labels.SelectorFromSet(labels.Set{"vendor": "OpenShift"}),
},
&workv1.ManifestWork{}: {
Label: labelSelector,
},
&addonv1alpha1.ClusterManagementAddOn{}: {
Label: labelSelector,
},
&addonv1alpha1.ManagedClusterAddOn{}: {
Label: labelSelector,
},
&admissionregistrationv1.MutatingWebhookConfiguration{}: {
Label: labelSelector,
},
&promv1.ServiceMonitor{}: {
Label: labelSelector,
},
&subv1alpha1.Subscription{}: {},
&corev1.PersistentVolumeClaim{}: {
Field: fields.OneTermEqualSelector(namespacePath, utils.GetDefaultNamespace()),
},
&mchv1.MultiClusterHub{}: {},
&apiextensionsv1.CustomResourceDefinition{}: {
Field: fields.SelectorFromSet(fields.Set{"metadata.name": "kafkas.kafka.strimzi.io"}),
},
}
return cache.New(config, cacheOpts)
}
10 changes: 7 additions & 3 deletions operator/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/envtest"

"github.com/stolostron/multicluster-global-hub/operator/pkg/config"
operatorconstants "github.com/stolostron/multicluster-global-hub/operator/pkg/constants"
"github.com/stolostron/multicluster-global-hub/pkg/utils"
)
Expand Down Expand Up @@ -99,7 +100,7 @@ var _ = Describe("Start Operator Test", Ordered, func() {
operatorConfig := parseFlags()
Expect(operatorConfig.LeaderElection).To(BeTrue())

electionConfig, err := getElectionConfig(nil)
electionConfig, err := config.GetElectionConfig()
Expect(err).NotTo(HaveOccurred())

Expect(electionConfig.LeaseDuration).To(Equal(137))
Expand Down Expand Up @@ -128,7 +129,7 @@ var _ = Describe("Start Operator Test", Ordered, func() {

ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
configMap, err := kubeClient.CoreV1().ConfigMaps(
_, err := kubeClient.CoreV1().ConfigMaps(
utils.GetDefaultNamespace()).Create(ctx,
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -138,7 +139,10 @@ var _ = Describe("Start Operator Test", Ordered, func() {
Data: map[string]string{"leaseDuration": "10", "renewDeadline": "8", "retryPeriod": "2"},
}, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
electionConfig, err := getElectionConfig(configMap)
err = config.LoadControllerConfig(ctx, kubeClient)
Expect(err).NotTo(HaveOccurred())

electionConfig, err := config.GetElectionConfig()
Expect(err).NotTo(HaveOccurred())

Expect(electionConfig.LeaseDuration).To(Equal(10))
Expand Down
Loading

0 comments on commit c8c17db

Please sign in to comment.