diff --git a/Makefile b/Makefile index 534572a5..63e51ff7 100644 --- a/Makefile +++ b/Makefile @@ -75,6 +75,7 @@ GINKGO_ARGS ?= SKIP_RESOURCE_CLEANUP ?= false USE_EXISTING_CLUSTER ?= false ISOLATED_MODE ?= false +CNI ?= calico # alternatively: kindnet GINKGO_NOCOLOR ?= false GINKGO_LABEL_FILTER ?= short || full GINKGO_TESTS ?= $(ROOT_DIR)/$(TEST_DIR)/e2e/suites/... @@ -484,6 +485,9 @@ $(RELEASE_DIR): $(CHART_RELEASE_DIR): mkdir -p $(CHART_RELEASE_DIR)/templates +clean-chart-release-dir: + rm -rf $(CHART_RELEASE_DIR) + $(CHART_PACKAGE_DIR): mkdir -p $(CHART_PACKAGE_DIR) @@ -492,7 +496,7 @@ release: clean-release $(RELEASE_DIR) ## Builds and push container images using $(MAKE) release-chart .PHONY: build-chart -build-chart: $(HELM) $(KUSTOMIZE) $(RELEASE_DIR) $(CHART_RELEASE_DIR) $(CHART_PACKAGE_DIR) ## Builds the chart to publish with a release +build-chart: $(HELM) $(KUSTOMIZE) clean-chart-release-dir $(RELEASE_DIR) $(CHART_RELEASE_DIR) $(CHART_PACKAGE_DIR) ## Builds the chart to publish with a release $(KUSTOMIZE) build ./config/chart > $(CHART_DIR)/templates/rancher-turtles-components.yaml cp -rf $(CHART_DIR)/* $(CHART_RELEASE_DIR) sed -i'' -e 's@image: .*@image: '"$(CONTROLLER_IMG)"'@' $(CHART_RELEASE_DIR)/values.yaml @@ -519,7 +523,8 @@ test-e2e: $(GINKGO) $(HELM) $(CLUSTERCTL) kubectl e2e-image ## Run the end-to-en -e2e.chart-path=$(ROOT_DIR)/$(CHART_RELEASE_DIR) \ -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \ - -e2e.isolated-mode=$(ISOLATED_MODE) + -e2e.isolated-mode=$(ISOLATED_MODE) \ + -e2e.cni=$(CNI) .PHONY: e2e-image e2e-image: ## Build the image for e2e tests diff --git a/internal/controllers/helpers.go b/internal/controllers/helpers.go index 93828689..4415c274 100644 --- a/internal/controllers/helpers.go +++ b/internal/controllers/helpers.go @@ -18,6 +18,8 @@ package controllers import ( "bufio" + "bytes" + "cmp" "context" "crypto/tls" "errors" @@ -26,9 +28,12 @@ import ( "net/http" "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -50,6 +55,7 @@ const ( capiClusterOwner = "cluster-api.cattle.io/capi-cluster-owner" capiClusterOwnerNamespace = "cluster-api.cattle.io/capi-cluster-owner-ns" + deploymentKind = "Deployment" defaultRequeueDuration = 1 * time.Minute ) @@ -178,14 +184,37 @@ func createImportManifest(ctx context.Context, remoteClient client.Client, in io return nil } -func createRawManifest(ctx context.Context, remoteClient client.Client, bytes []byte) error { - items, err := utilyaml.ToUnstructured(bytes) - if err != nil { - return fmt.Errorf("error unmarshalling bytes or empty object passed: %w", err) - } +func createRawManifest(ctx context.Context, remoteClient client.Client, data []byte) error { + log := log.FromContext(ctx) + decoder := utilyaml.NewYAMLDecoder(io.NopCloser(bytes.NewReader(data))) - for _, obj := range items { - if err := createObject(ctx, remoteClient, obj.DeepCopy()); err != nil { + for { + u := &unstructured.Unstructured{} + + _, gvk, err := decoder.Decode(nil, u) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } + + if gvk.Kind == deploymentKind { + deploy := &appsv1.Deployment{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, deploy); err != nil { + log.Error(err, "failed to decode agent deployment") + return err + } + + setDeploymentAffinity(deploy) + + if err := createObject(ctx, remoteClient, deploy); err != nil { + return err + } + + continue + } + + if err := createObject(ctx, remoteClient, u.DeepCopy()); err != nil { return err } } @@ -193,6 +222,33 @@ func createRawManifest(ctx context.Context, remoteClient client.Client, bytes [] return nil } +func setDeploymentAffinity(deploy *appsv1.Deployment) { + affinity := cmp.Or(deploy.Spec.Template.Spec.Affinity, &corev1.Affinity{}) + nodeAffinity := cmp.Or(affinity.NodeAffinity, &corev1.NodeAffinity{}) + preference := corev1.PreferredSchedulingTerm{ + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{{ + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.NodeSelectorOpExists, + }}, + }, + } + nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, preference) + preference.Preference.MatchExpressions = []corev1.NodeSelectorRequirement{{ + Key: "node-role.kubernetes.io/controlplane", + Operator: corev1.NodeSelectorOpExists, + }} + nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, preference) + preference.Preference.MatchExpressions = []corev1.NodeSelectorRequirement{{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpExists, + }} + nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, preference) + affinity.NodeAffinity = nodeAffinity + deploy.Spec.Template.Spec.Affinity = affinity +} + func createObject(ctx context.Context, c client.Client, obj client.Object) error { log := log.FromContext(ctx) gvk := obj.GetObjectKind().GroupVersionKind() diff --git a/internal/controllers/import_controller.go b/internal/controllers/import_controller.go index 3594e8f6..af76e5fa 100644 --- a/internal/controllers/import_controller.go +++ b/internal/controllers/import_controller.go @@ -20,7 +20,9 @@ import ( "context" "fmt" "strings" + "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -49,6 +51,8 @@ import ( turtlespredicates "github.com/rancher/turtles/util/predicates" ) +const fieldOwner = "rancher-turtles" + // CAPIImportReconciler represents a reconciler for importing CAPI clusters in Rancher. type CAPIImportReconciler struct { Client client.Client @@ -241,11 +245,50 @@ func (r *CAPIImportReconciler) reconcileNormal(ctx context.Context, capiCluster log.Info("found cluster name", "name", rancherCluster.Status.ClusterName) - if rancherCluster.Status.AgentDeployed { - log.Info("agent already deployed, no action needed") + if rancherCluster.Status.Ready { + log.Info("cluster is ready, no action needed") return ctrl.Result{}, nil } + // We have to ensure the agent deployment has correct nodeAffinity settings at all times + remoteClient, err := r.remoteClientGetter(ctx, capiCluster.Name, r.Client, client.ObjectKeyFromObject(capiCluster)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) + } + + if rancherCluster.Status.AgentDeployed { + log.Info("updating agent node affinity settings") + + agent := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "cattle-cluster-agent", + Namespace: "cattle-system", + }} + + if err := remoteClient.Get(ctx, client.ObjectKeyFromObject(agent), agent); err != nil { + log.Error(err, "unable to get existing agent deployment") + return ctrl.Result{}, err + } + + setDeploymentAffinity(agent) + agent.SetManagedFields(nil) + agent.TypeMeta = metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: deploymentKind, + } + + if err := remoteClient.Patch(ctx, agent, client.Apply, []client.PatchOption{ + client.ForceOwnership, + client.FieldOwner(fieldOwner), + }...); err != nil { + log.Error(err, "unable to update existing agent deployment") + return ctrl.Result{}, err + } + + // During the provisioning after registration the initial deployment gets + // updated by the rancher. We must not miss it. + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + // get the registration manifest manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Status.ClusterName, capiCluster.Namespace, r.RancherClient, r.InsecureSkipVerify) if err != nil { @@ -259,11 +302,6 @@ func (r *CAPIImportReconciler) reconcileNormal(ctx context.Context, capiCluster log.Info("Creating import manifest") - remoteClient, err := r.remoteClientGetter(ctx, capiCluster.Name, r.Client, client.ObjectKeyFromObject(capiCluster)) - if err != nil { - return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) - } - if err := createImportManifest(ctx, remoteClient, strings.NewReader(manifest)); err != nil { return ctrl.Result{}, fmt.Errorf("creating import manifest: %w", err) } diff --git a/internal/controllers/import_controller_test.go b/internal/controllers/import_controller_test.go index 5b6db481..e46e160a 100644 --- a/internal/controllers/import_controller_test.go +++ b/internal/controllers/import_controller_test.go @@ -25,11 +25,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/turtles/internal/controllers/testdata" - managementv3 "github.com/rancher/turtles/internal/rancher/management/v3" - provisioningv1 "github.com/rancher/turtles/internal/rancher/provisioning/v1" - "github.com/rancher/turtles/internal/test" - turtlesnaming "github.com/rancher/turtles/util/naming" + "github.com/rancher-sandbox/rancher-turtles/internal/controllers/testdata" + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + provisioningv1 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/provisioning/v1" + "github.com/rancher-sandbox/rancher-turtles/internal/test" + turtlesnaming "github.com/rancher-sandbox/rancher-turtles/util/naming" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -225,6 +226,50 @@ var _ = Describe("reconcile CAPI Cluster", func() { unstructuredObj.SetUnstructuredContent(u) unstructuredObj.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + if unstructuredObj.GroupVersionKind().Kind == "Deployment" { + dep := &appsv1.Deployment{} + g.Eventually(testEnv.GetAs(unstructuredObj, dep)).ShouldNot(BeNil()) + affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity + g.Expect(affinity).ToNot(BeNil()) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/controlplane", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + })), + )) + } + g.Expect(cl.Get(ctx, client.ObjectKey{ Namespace: unstructuredObj.GetNamespace(), Name: unstructuredObj.GetName(), diff --git a/internal/controllers/import_controller_v3.go b/internal/controllers/import_controller_v3.go index dd7455fe..2dc080b6 100644 --- a/internal/controllers/import_controller_v3.go +++ b/internal/controllers/import_controller_v3.go @@ -20,7 +20,9 @@ import ( "context" "fmt" "strings" + "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -274,11 +276,50 @@ func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, err } - if conditions.IsTrue(rancherCluster, managementv3.ClusterConditionAgentDeployed) { - log.Info("agent already deployed, no action needed") + if conditions.IsTrue(rancherCluster, managementv3.ClusterConditionReady) { + log.Info("cluster is ready, no action needed") return ctrl.Result{}, nil } + // We have to ensure the agent deployment has correct nodeAffinity settings at all times + remoteClient, err := r.remoteClientGetter(ctx, capiCluster.Name, r.Client, client.ObjectKeyFromObject(capiCluster)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) + } + + if conditions.IsTrue(rancherCluster, managementv3.ClusterConditionAgentDeployed) { + log.Info("updating agent node affinity settings") + + agent := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "cattle-cluster-agent", + Namespace: "cattle-system", + }} + + if err := remoteClient.Get(ctx, client.ObjectKeyFromObject(agent), agent); err != nil { + log.Error(err, "unable to get existing agent deployment") + return ctrl.Result{}, err + } + + setDeploymentAffinity(agent) + agent.SetManagedFields(nil) + agent.TypeMeta = metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: deploymentKind, + } + + if err := remoteClient.Patch(ctx, agent, client.Apply, []client.PatchOption{ + client.ForceOwnership, + client.FieldOwner(fieldOwner), + }...); err != nil { + log.Error(err, "unable to update existing agent deployment") + return ctrl.Result{}, err + } + + // During the provisioning after registration the initial deployment gets + // updated by the rancher. We must not miss it. + return ctrl.Result{RequeueAfter: 10 * time.Second}, nil + } + // get the registration manifest manifest, err := getClusterRegistrationManifest(ctx, rancherCluster.Name, rancherCluster.Name, r.RancherClient, r.InsecureSkipVerify) if err != nil { @@ -292,11 +333,6 @@ func (r *CAPIImportManagementV3Reconciler) reconcileNormal(ctx context.Context, log.Info("Creating import manifest") - remoteClient, err := r.remoteClientGetter(ctx, capiCluster.Name, r.Client, client.ObjectKeyFromObject(capiCluster)) - if err != nil { - return ctrl.Result{}, fmt.Errorf("getting remote cluster client: %w", err) - } - if err := createImportManifest(ctx, remoteClient, strings.NewReader(manifest)); err != nil { return ctrl.Result{}, fmt.Errorf("creating import manifest: %w", err) } diff --git a/internal/controllers/import_controller_v3_test.go b/internal/controllers/import_controller_v3_test.go index b55dc26f..561166ec 100644 --- a/internal/controllers/import_controller_v3_test.go +++ b/internal/controllers/import_controller_v3_test.go @@ -25,9 +25,10 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/rancher/turtles/internal/controllers/testdata" - managementv3 "github.com/rancher/turtles/internal/rancher/management/v3" - "github.com/rancher/turtles/internal/test" + "github.com/rancher-sandbox/rancher-turtles/internal/controllers/testdata" + managementv3 "github.com/rancher-sandbox/rancher-turtles/internal/rancher/management/v3" + "github.com/rancher-sandbox/rancher-turtles/internal/test" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -261,6 +262,50 @@ var _ = Describe("reconcile CAPI Cluster", func() { unstructuredObj.SetUnstructuredContent(u) unstructuredObj.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + if unstructuredObj.GroupVersionKind().Kind == "Deployment" { + dep := &appsv1.Deployment{} + g.Eventually(testEnv.GetAs(unstructuredObj, dep)).ShouldNot(BeNil()) + affinity := dep.Spec.Template.Spec.Affinity.NodeAffinity + g.Expect(affinity).ToNot(BeNil()) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/controlplane", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpExists, + })), + )) + g.Expect(affinity.PreferredDuringSchedulingIgnoredDuringExecution).To( + ContainElement( + HaveField( + "Preference.MatchExpressions", + HaveExactElements(corev1.NodeSelectorRequirement{ + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + })), + )) + } + g.Expect(cl.Get(ctx, client.ObjectKey{ Namespace: unstructuredObj.GetNamespace(), Name: unstructuredObj.GetName(), diff --git a/test/e2e/const.go b/test/e2e/const.go index 4616ab5c..ca2d0031 100644 --- a/test/e2e/const.go +++ b/test/e2e/const.go @@ -74,6 +74,12 @@ var ( //go:embed data/cluster-templates/vsphere-kubeadm.yaml CAPIvSphereKubeadm []byte + + //go:embed data/cluster-templates/cni-calico.yaml + CalicoCNI []byte + + //go:embed data/cluster-templates/cni-kindnet.yaml + KindnetCNI []byte ) const ( diff --git a/test/e2e/data/capi-operator/capa-variables.yaml b/test/e2e/data/capi-operator/capa-variables.yaml index 08380e56..2a88f6ea 100644 --- a/test/e2e/data/capi-operator/capa-variables.yaml +++ b/test/e2e/data/capi-operator/capa-variables.yaml @@ -3,12 +3,16 @@ apiVersion: v1 kind: Namespace metadata: name: capa-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: v1 kind: Secret metadata: name: full-variables namespace: capa-system + annotations: + "helm.sh/resource-policy": keep type: Opaque stringData: AWS_B64ENCODED_CREDENTIALS: "{{ .AWSEncodedCredentials }}" diff --git a/test/e2e/data/capi-operator/capi-providers.yaml b/test/e2e/data/capi-operator/capi-providers.yaml index 01502f34..a7be8da3 100644 --- a/test/e2e/data/capi-operator/capi-providers.yaml +++ b/test/e2e/data/capi-operator/capi-providers.yaml @@ -3,12 +3,16 @@ apiVersion: v1 kind: Namespace metadata: name: capd-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: turtles-capi.cattle.io/v1alpha1 kind: CAPIProvider metadata: name: docker namespace: capd-system + annotations: + "helm.sh/resource-policy": keep spec: name: docker type: infrastructure @@ -19,12 +23,16 @@ apiVersion: v1 kind: Namespace metadata: name: capi-kubeadm-bootstrap-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: turtles-capi.cattle.io/v1alpha1 kind: CAPIProvider metadata: name: kubeadm-bootstrap namespace: capi-kubeadm-bootstrap-system + annotations: + "helm.sh/resource-policy": keep spec: name: kubeadm type: bootstrap @@ -36,15 +44,30 @@ apiVersion: v1 kind: Namespace metadata: name: capi-kubeadm-control-plane-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: turtles-capi.cattle.io/v1alpha1 kind: CAPIProvider metadata: name: kubeadm-control-plane namespace: capi-kubeadm-control-plane-system + annotations: + "helm.sh/resource-policy": keep spec: name: kubeadm type: controlPlane version: v1.4.6 configSecret: name: variables +--- +apiVersion: turtles-capi.cattle.io/v1alpha1 +kind: CAPIProvider +metadata: + name: helm + namespace: capd-system + annotations: + "helm.sh/resource-policy": keep +spec: + type: addon + name: helm diff --git a/test/e2e/data/capi-operator/capv-provider.yaml b/test/e2e/data/capi-operator/capv-provider.yaml index de9a09ef..2756bbcb 100644 --- a/test/e2e/data/capi-operator/capv-provider.yaml +++ b/test/e2e/data/capi-operator/capv-provider.yaml @@ -4,6 +4,8 @@ kind: CAPIProvider metadata: name: vsphere namespace: capv-system + annotations: + "helm.sh/resource-policy": keep spec: name: vsphere type: infrastructure diff --git a/test/e2e/data/capi-operator/capv-variables.yaml b/test/e2e/data/capi-operator/capv-variables.yaml index 3fec24fa..adff4b05 100644 --- a/test/e2e/data/capi-operator/capv-variables.yaml +++ b/test/e2e/data/capi-operator/capv-variables.yaml @@ -3,12 +3,16 @@ apiVersion: v1 kind: Namespace metadata: name: capv-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: v1 kind: Secret metadata: name: vsphere-variables namespace: capv-system + annotations: + "helm.sh/resource-policy": keep type: Opaque stringData: VSPHERE_USERNAME: "${VSPHERE_USERNAME}" diff --git a/test/e2e/data/capi-operator/capz-identity-secret.yaml b/test/e2e/data/capi-operator/capz-identity-secret.yaml index 0e498bb0..3355917a 100644 --- a/test/e2e/data/capi-operator/capz-identity-secret.yaml +++ b/test/e2e/data/capi-operator/capz-identity-secret.yaml @@ -3,6 +3,8 @@ apiVersion: v1 kind: Namespace metadata: name: capz-system + annotations: + "helm.sh/resource-policy": keep --- apiVersion: v1 stringData: @@ -11,4 +13,6 @@ kind: Secret metadata: name: cluster-identity-secret namespace: capz-system + annotations: + "helm.sh/resource-policy": keep type: Opaque \ No newline at end of file diff --git a/test/e2e/data/capi-operator/full-providers.yaml b/test/e2e/data/capi-operator/full-providers.yaml index a6b3bc72..c4694455 100644 --- a/test/e2e/data/capi-operator/full-providers.yaml +++ b/test/e2e/data/capi-operator/full-providers.yaml @@ -4,6 +4,8 @@ kind: CAPIProvider metadata: name: aws namespace: capa-system + annotations: + "helm.sh/resource-policy": keep spec: type: infrastructure name: aws @@ -20,6 +22,8 @@ kind: CAPIProvider metadata: name: azure namespace: capz-system + annotations: + "helm.sh/resource-policy": keep spec: type: infrastructure name: azure diff --git a/test/e2e/data/cluster-templates/cni-calico.yaml b/test/e2e/data/cluster-templates/cni-calico.yaml new file mode 100644 index 00000000..4b68f1bc --- /dev/null +++ b/test/e2e/data/cluster-templates/cni-calico.yaml @@ -0,0 +1,28 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cluster-calico-cni + annotations: + "helm.sh/resource-policy": keep +spec: + clusterSelector: + matchLabels: + calico: "true" + releaseName: calico + repoURL: https://docs.tigera.io/calico/charts + chartName: tigera-operator + namespace: kube-system + valuesTemplate: | + installation: + cni: + type: Calico + ipam: + type: HostLocal + calicoNetwork: + bgp: Disabled + mtu: 1350 + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: None + natOutgoing: Enabled + nodeSelector: all(){{end}} diff --git a/test/e2e/data/cluster-templates/cni-kindnet.yaml b/test/e2e/data/cluster-templates/cni-kindnet.yaml new file mode 100644 index 00000000..8a85d775 --- /dev/null +++ b/test/e2e/data/cluster-templates/cni-kindnet.yaml @@ -0,0 +1,137 @@ +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-crs-0 + annotations: + "helm.sh/resource-policy": keep +spec: + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-crs-0 + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce +--- +apiVersion: v1 +data: + kindnet.yaml: | + # kindnetd networking manifest + --- + kind: ClusterRole + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: kindnet + rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - patch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: kindnet + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kindnet + subjects: + - kind: ServiceAccount + name: kindnet + namespace: kube-system + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: kindnet + namespace: kube-system + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: kindnet + namespace: kube-system + labels: + tier: node + app: kindnet + k8s-app: kindnet + spec: + selector: + matchLabels: + app: kindnet + template: + metadata: + labels: + tier: node + app: kindnet + k8s-app: kindnet + spec: + hostNetwork: true + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: kindnet + containers: + - name: kindnet-cni + image: kindest/kindnetd:v20230330-48f316cd + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_SUBNET + value: '192.168.0.0/16' + volumeMounts: + - name: cni-cfg + mountPath: /etc/cni/net.d + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: lib-modules + mountPath: /lib/modules + readOnly: true + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_RAW", "NET_ADMIN"] + volumes: + - name: cni-bin + hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + - name: cni-cfg + hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: lib-modules + hostPath: + path: /lib/modules +kind: ConfigMap +metadata: + name: cni-${CLUSTER_NAME}-crs-0 diff --git a/test/e2e/data/cluster-templates/docker-kubeadm.yaml b/test/e2e/data/cluster-templates/docker-kubeadm.yaml index 890e1a79..76a4c114 100644 --- a/test/e2e/data/cluster-templates/docker-kubeadm.yaml +++ b/test/e2e/data/cluster-templates/docker-kubeadm.yaml @@ -12,22 +12,18 @@ spec: controlPlane: true fd3: controlPlane: true - fd4: - controlPlane: false - fd5: - controlPlane: false - fd6: - controlPlane: false - fd7: - controlPlane: false - fd8: - controlPlane: false + fd4: {} + fd5: {} + fd6: {} + fd7: {} + fd8: {} --- apiVersion: cluster.x-k8s.io/v1beta1 kind: Cluster metadata: labels: cni: ${CLUSTER_NAME}-crs-0 + calico: "true" name: ${CLUSTER_NAME} spec: clusterNetwork: @@ -145,139 +141,3 @@ spec: kind: DockerMachineTemplate name: ${CLUSTER_NAME}-md-0 version: ${KUBERNETES_VERSION} ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: ${CLUSTER_NAME}-crs-0 -spec: - clusterSelector: - matchLabels: - cni: ${CLUSTER_NAME}-crs-0 - resources: - - kind: ConfigMap - name: cni-${CLUSTER_NAME}-crs-0 - strategy: ApplyOnce ---- -apiVersion: v1 -data: - kindnet.yaml: | - # kindnetd networking manifest - --- - kind: ClusterRole - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: kindnet - rules: - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - patch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - --- - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: kindnet - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kindnet - subjects: - - kind: ServiceAccount - name: kindnet - namespace: kube-system - --- - apiVersion: v1 - kind: ServiceAccount - metadata: - name: kindnet - namespace: kube-system - --- - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: kindnet - namespace: kube-system - labels: - tier: node - app: kindnet - k8s-app: kindnet - spec: - selector: - matchLabels: - app: kindnet - template: - metadata: - labels: - tier: node - app: kindnet - k8s-app: kindnet - spec: - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - serviceAccountName: kindnet - containers: - - name: kindnet-cni - image: kindest/kindnetd:v20230330-48f316cd - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: POD_SUBNET - value: '192.168.0.0/16' - volumeMounts: - - name: cni-cfg - mountPath: /etc/cni/net.d - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - - name: lib-modules - mountPath: /lib/modules - readOnly: true - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_RAW", "NET_ADMIN"] - volumes: - - name: cni-bin - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - - name: cni-cfg - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: lib-modules - hostPath: - path: /lib/modules -kind: ConfigMap -metadata: - name: cni-${CLUSTER_NAME}-crs-0 \ No newline at end of file diff --git a/test/e2e/flags.go b/test/e2e/flags.go index 6322f006..f3c3d4a2 100644 --- a/test/e2e/flags.go +++ b/test/e2e/flags.go @@ -49,6 +49,9 @@ type FlagValues struct { // ClusterctlBinaryPath is the path to the clusterctl binary to use. ClusterctlBinaryPath string + + // CNI for the cluster to use. Values include calico or kindnet. + CNI string } // InitFlags is used to specify the standard flags for the e2e tests. @@ -62,4 +65,5 @@ func InitFlags(values *FlagValues) { flag.StringVar(&values.ClusterctlBinaryPath, "e2e.clusterctl-binary-path", "helm", "path to the clusterctl binary") flag.StringVar(&values.ChartPath, "e2e.chart-path", "", "path to the operator chart") flag.BoolVar(&values.IsolatedMode, "e2e.isolated-mode", false, "if true, the test will run without ngrok and exposing the cluster to the internet. This setup will only work with CAPD or other providers that run in the same network as the bootstrap cluster.") + flag.StringVar(&values.CNI, "e2e.cni", "calico", "specify CNI solution for the cluster. Allowed values: calico, kindnet") } diff --git a/test/e2e/specs/import_gitops.go b/test/e2e/specs/import_gitops.go index 54d8fa1d..e1590833 100644 --- a/test/e2e/specs/import_gitops.go +++ b/test/e2e/specs/import_gitops.go @@ -58,6 +58,8 @@ type CreateUsingGitOpsSpecInput struct { ClusterName string AdditionalTemplateVariables map[string]string + CNITemplate []byte + CAPIClusterCreateWaitName string DeleteClusterWaitName string @@ -184,6 +186,16 @@ func CreateUsingGitOpsSpec(ctx context.Context, inputGetter func() CreateUsingGi AddtionalEnvironmentVariables: additionalVars, })).To(Succeed()) + if input.CNITemplate != nil { + cniPath := filepath.Join(clustersDir, fmt.Sprintf("%s-cni.yaml", input.ClusterName)) + Expect(turtlesframework.ApplyFromTemplate(ctx, turtlesframework.ApplyFromTemplateInput{ + Getter: input.E2EConfig.GetVariable, + Template: input.CNITemplate, + OutputFilePath: cniPath, + AddtionalEnvironmentVariables: additionalVars, + })).To(Succeed()) + } + fleetPath := filepath.Join(clustersDir, "fleet.yaml") turtlesframework.FleetCreateFleetFile(ctx, turtlesframework.FleetCreateFleetFileInput{ Namespace: namespace.Name, diff --git a/test/e2e/specs/import_gitops_mgmtv3.go b/test/e2e/specs/import_gitops_mgmtv3.go index e8360bc5..f735a241 100644 --- a/test/e2e/specs/import_gitops_mgmtv3.go +++ b/test/e2e/specs/import_gitops_mgmtv3.go @@ -58,6 +58,8 @@ type CreateMgmtV3UsingGitOpsSpecInput struct { ClusterName string AdditionalTemplateVariables map[string]string + CNITemplate []byte + CAPIClusterCreateWaitName string DeleteClusterWaitName string @@ -189,6 +191,16 @@ func CreateMgmtV3UsingGitOpsSpec(ctx context.Context, inputGetter func() CreateM AddtionalEnvironmentVariables: additionalVars, })).To(Succeed()) + if input.CNITemplate != nil { + cniPath := filepath.Join(clustersDir, fmt.Sprintf("%s-cni.yaml", input.ClusterName)) + Expect(turtlesframework.ApplyFromTemplate(ctx, turtlesframework.ApplyFromTemplateInput{ + Getter: input.E2EConfig.GetVariable, + Template: input.CNITemplate, + OutputFilePath: cniPath, + AddtionalEnvironmentVariables: additionalVars, + })).To(Succeed()) + } + fleetPath := filepath.Join(clustersDir, "fleet.yaml") turtlesframework.FleetCreateFleetFile(ctx, turtlesframework.FleetCreateFleetFileInput{ Namespace: namespace.Name, diff --git a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go index b61a2b85..e5aa2510 100644 --- a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go +++ b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go @@ -37,6 +37,10 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { + cni := e2e.KindnetCNI + if flagVals.CNI == "calico" { + cni = e2e.CalicoCNI + } return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, @@ -45,6 +49,7 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "highlander-e2e-cluster1", + CNITemplate: cni, ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), GitAddr: giteaResult.GitAddress, diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 26bae1cd..b327fd26 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -39,6 +39,10 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { + cni := e2e.KindnetCNI + if flagVals.CNI == "calico" { + cni = e2e.CalicoCNI + } return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, @@ -47,6 +51,7 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "highlander-e2e-cluster1", + CNITemplate: cni, ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), GitAddr: giteaResult.GitAddress, diff --git a/test/e2e/suites/managementv3/managementv3_test.go b/test/e2e/suites/managementv3/managementv3_test.go index 8d4867b1..a6af7920 100644 --- a/test/e2e/suites/managementv3/managementv3_test.go +++ b/test/e2e/suites/managementv3/managementv3_test.go @@ -36,6 +36,10 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { + cni := e2e.KindnetCNI + if flagVals.CNI == "calico" { + cni = e2e.CalicoCNI + } return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, @@ -44,6 +48,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele ArtifactFolder: flagVals.ArtifactFolder, ClusterTemplate: e2e.CAPIDockerKubeadm, ClusterName: "highlander-e2e-clusterv3-1", + CNITemplate: cni, ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), GitAddr: giteaResult.GitAddress, diff --git a/test/testenv/cleanup.go b/test/testenv/cleanup.go index 0218b696..25e2af48 100644 --- a/test/testenv/cleanup.go +++ b/test/testenv/cleanup.go @@ -74,7 +74,7 @@ func CollectArtifacts(ctx context.Context, kubeconfigPath, name string, args ... return fmt.Errorf("Unable to collect artifacts: kubeconfig path is empty") } - aargs := append([]string{"crust-gather", "collect", "--kubeconfig", kubeconfigPath, "-f", name}, args...) + aargs := append([]string{"crust-gather", "collect", "-v", "ERROR", "--kubeconfig", kubeconfigPath, "-f", name}, args...) for _, secret := range secrets { aargs = append(aargs, "-s", secret) } diff --git a/test/testenv/turtles.go b/test/testenv/turtles.go index 7d6ca896..88d00070 100644 --- a/test/testenv/turtles.go +++ b/test/testenv/turtles.go @@ -23,11 +23,14 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" opframework "sigs.k8s.io/cluster-api-operator/test/framework" "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/controller-runtime/pkg/client" - turtlesframework "github.com/rancher/turtles/test/framework" + turtlesv1 "github.com/rancher-sandbox/rancher-turtles/api/v1alpha1" + turtlesframework "github.com/rancher-sandbox/rancher-turtles/test/framework" ) type DeployRancherTurtlesInput struct { @@ -132,4 +135,23 @@ func DeployRancherTurtles(ctx context.Context, input DeployRancherTurtlesInput) Namespace: "capd-system", }}, }, input.WaitDeploymentsReadyInterval...) + + key := client.ObjectKey{ + Namespace: "capd-system", + Name: "helm", + } + if err := input.BootstrapClusterProxy.GetClient().Get(ctx, key, &turtlesv1.CAPIProvider{}); apierrors.IsNotFound(err) { + return + } + + Expect(err).ToNot(HaveOccurred()) + + By("Waiting for CAPI helm provider deployment to be available") + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: input.BootstrapClusterProxy.GetClient(), + Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "caaph-controller-manager", + Namespace: "capd-system", + }}, + }, input.WaitDeploymentsReadyInterval...) }