From 7b617e56ed4ef00a154759152075e54659eaf495 Mon Sep 17 00:00:00 2001 From: Alexandr Demicev Date: Thu, 12 Dec 2024 17:21:20 +0100 Subject: [PATCH] Run each case inside suite in parallel Signed-off-by: Alexandr Demicev --- .github/workflows/run-e2e-suite.yaml | 1 + Makefile | 2 +- .../chart-upgrade/chart_upgrade_test.go | 40 +- test/e2e/suites/chart-upgrade/suite_test.go | 217 +++++--- .../embedded_capi_disabled_v3_test.go | 6 +- .../embedded-capi-disabled-v3/suite_test.go | 395 ++++++++------ .../embedded_capi_disabled_test.go | 6 +- .../embedded-capi-disabled/suite_test.go | 404 +++++++------- .../etcd_snapshot_restore_test.go | 6 +- .../etcd-snapshot-restore/suite_test.go | 335 +++++++----- .../import-gitops-v3/import_gitops_v3_test.go | 30 +- .../e2e/suites/import-gitops-v3/suite_test.go | 406 +++++++------- .../import-gitops/import_gitops_test.go | 24 +- test/e2e/suites/import-gitops/suite_test.go | 500 ++++++++++-------- .../migrate_gitops_provv1_mgmtv3_test.go | 6 +- test/e2e/suites/migrate-gitops/suite_test.go | 388 ++++++++------ test/e2e/suites/update-labels/suite_test.go | 269 ++++++---- .../update-labels/update_labels_test.go | 27 +- test/e2e/suites/v2prov/suite_test.go | 267 ++++++---- test/e2e/suites/v2prov/v2prov_test.go | 26 +- test/testenv/setupcluster.go | 13 +- 21 files changed, 1893 insertions(+), 1475 deletions(-) diff --git a/.github/workflows/run-e2e-suite.yaml b/.github/workflows/run-e2e-suite.yaml index 27f80a0f..d4ea10de 100644 --- a/.github/workflows/run-e2e-suite.yaml +++ b/.github/workflows/run-e2e-suite.yaml @@ -53,6 +53,7 @@ env: MANAGEMENT_CLUSTER_ENVIRONMENT: ${{ inputs.MANAGEMENT_CLUSTER_ENVIRONMENT }} GINKGO_LABEL_FILTER: full GINKGO_TESTS: ${{ github.workspace }}/${{ inputs.test_suite }} + GINKGO_NODES: 5 jobs: run_e2e_tests: diff --git a/Makefile b/Makefile index 39935902..63477221 100644 --- a/Makefile +++ b/Makefile @@ -577,7 +577,7 @@ release-chart: $(HELM) $(NOTES) build-chart verify-gen $(CACHE_DIR): mkdir -p $(CACHE_DIR)/ -E2E_RUN_COMMAND=$(E2ECONFIG_VARS) $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ +E2E_RUN_COMMAND=$(E2ECONFIG_VARS) $(GINKGO) -v --trace -p -procs=5 -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ -poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" --label-filter="$(GINKGO_LABEL_FILTER)" \ $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \ --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) $(GINKGO_TESTS) -- \ diff --git a/test/e2e/suites/chart-upgrade/chart_upgrade_test.go b/test/e2e/suites/chart-upgrade/chart_upgrade_test.go index 49121251..8c03e679 100644 --- a/test/e2e/suites/chart-upgrade/chart_upgrade_test.go +++ b/test/e2e/suites/chart-upgrade/chart_upgrade_test.go @@ -38,20 +38,20 @@ import ( var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) It("Should perform upgrade from GA version to latest", func() { rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), TurtlesChartPath: "https://rancher.github.io/turtles", CAPIProvidersYAML: e2e.CapiProvidersLegacy, Namespace: framework.DefaultRancherTurtlesNamespace, Version: "v0.6.0", - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: map[string]string{}, } testenv.DeployRancherTurtles(ctx, rtInput) @@ -60,8 +60,8 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + BootstrapClusterProxy: bootstrapClusterProxy, + WaitInterval: e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers"), Variables: e2eConfig.Variables, } @@ -70,12 +70,12 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL testenv.DeployChartMuseum(ctx, chartMuseumDeployInput) upgradeInput := testenv.UpgradeRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), Namespace: framework.DefaultRancherTurtlesNamespace, Image: "ghcr.io/rancher/turtles-e2e", Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers"), AdditionalValues: rtInput.AdditionalValues, PostUpgradeSteps: []func(){}, } @@ -88,20 +88,20 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { By("Waiting for CAAPF deployment to be available") capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "caapf-controller-manager", Namespace: e2e.RancherTurtlesNamespace, }}, - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) By("Setting the CAAPF config to use hostNetwork") - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) + Expect(bootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) }) upgradeInput.PostUpgradeSteps = append(upgradeInput.PostUpgradeSteps, func() { framework.WaitForCAPIProviderRollout(ctx, framework.WaitForCAPIProviderRolloutInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Version: e2e.CAPIVersion, Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "capi-controller-manager", @@ -110,17 +110,17 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL Image: "registry.suse.com/rancher/cluster-api-controller:", Name: "cluster-api", Namespace: "capi-system", - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) }, func() { framework.WaitForCAPIProviderRollout(ctx, framework.WaitForCAPIProviderRolloutInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Version: e2e.CAPIVersion, Name: "kubeadm-control-plane", Namespace: "capi-kubeadm-control-plane-system", - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) }, func() { framework.WaitForCAPIProviderRollout(ctx, framework.WaitForCAPIProviderRolloutInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "rke2-bootstrap-controller-manager", Namespace: "rke2-bootstrap-system", @@ -128,10 +128,10 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL Image: "registry.suse.com/rancher/cluster-api-provider-rke2-bootstrap:", Name: "rke2-bootstrap", Namespace: "rke2-bootstrap-system", - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) }, func() { framework.WaitForCAPIProviderRollout(ctx, framework.WaitForCAPIProviderRolloutInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ Name: "rke2-control-plane-controller-manager", Namespace: "rke2-control-plane-system", @@ -139,14 +139,14 @@ var _ = Describe("Chart upgrade functionality should work", Label(e2e.ShortTestL Image: "registry.suse.com/rancher/cluster-api-provider-rke2-controlplane:", Name: "rke2-control-plane", Namespace: "rke2-control-plane-system", - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) }, func() { framework.WaitForCAPIProviderRollout(ctx, framework.WaitForCAPIProviderRolloutInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), Version: e2e.CAPIVersion, Name: "docker", Namespace: "capd-system", - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + }, e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) }) testenv.UpgradeRancherTurtles(ctx, upgradeInput) diff --git a/test/e2e/suites/chart-upgrade/suite_test.go b/test/e2e/suites/chart-upgrade/suite_test.go index 7bdf3406..7a8c8836 100644 --- a/test/e2e/suites/chart-upgrade/suite_test.go +++ b/test/e2e/suites/chart-upgrade/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package chart_upgrade import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -33,6 +37,7 @@ import ( "github.com/rancher/turtles/test/testenv" "k8s.io/klog/v2" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -58,7 +63,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -74,102 +81,140 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-chart-upgrade") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), - RancherHost: hostName, - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) -}) + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) -var _ = AfterSuite(func() { - testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - Namespace: framework.DefaultRancherTurtlesNamespace, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), + RancherHost: hostName, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(4)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + + configBytes, err := base64.StdEncoding.DecodeString(parts[2]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[3] }) - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + Namespace: framework.DefaultRancherTurtlesNamespace, + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), + }) - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel diff --git a/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go b/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go index 57f56e91..205bb33c 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/embedded_capi_disabled_v3_test.go @@ -31,14 +31,14 @@ import ( var _ = Describe("[AWS] [EKS] [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import (embedded capi disabled from start)", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -46,7 +46,7 @@ var _ = Describe("[AWS] [EKS] [management.cattle.io/v3] Create and delete CAPI c ClusterName: "highlander-e2e-cluster1", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go index 6f88b7d7..92f95525 100644 --- a/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled-v3/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package embedded_capi_disabled_v3 import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -32,6 +36,7 @@ import ( "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" "k8s.io/klog/v2" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -57,8 +62,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -74,180 +80,221 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-embedded-capi-disabled-v3") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. - // and the deploy Rancher Turtles. - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherFeatures: "embedded-cluster-api=false", - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{ - "cluster-api-operator.cert-manager.enabled": "false", - "rancherTurtles.features.embedded-capi.disabled": "false", - }, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - // NOTE: there are no short or local tests in this suite - By("Deploying additional infrastructure providers") - awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) - gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) - Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") - Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") - - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.AWSProviderSecret, - e2e.AzureIdentitySecret, - e2e.GCPProviderSecret, - }, - CAPIProvidersYAML: e2e.FullProviders, - TemplateData: map[string]string{ - "AWSEncodedCredentials": awsCreds, - "GCPEncodedCredentials": gcpCreds, - }, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capa-controller-manager", - Namespace: "capa-system", + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. + // and the deploy Rancher Turtles. + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherFeatures: "embedded-cluster-api=false", + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "cluster-api-operator.cert-manager.enabled": "false", + "rancherTurtles.features.embedded-capi.disabled": "false", + }, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + // NOTE: there are no short or local tests in this suite + By("Deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + e2e.GCPProviderSecret, + }, + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": awsCreds, + "GCPEncodedCredentials": gcpCreds, }, - { - Name: "capz-controller-manager", - Namespace: "capz-system", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + { + Name: "capg-controller-manager", + Namespace: "capg-system", + }, }, - { - Name: "capg-controller-manager", - Namespace: "capg-system", + }) + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), }, - }, - }) - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), + }) + + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) diff --git a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go index e488e965..f5d2a96b 100644 --- a/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go +++ b/test/e2e/suites/embedded-capi-disabled/embedded_capi_disabled_test.go @@ -31,14 +31,14 @@ import ( var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality should work with namespace auto-import (embedded capi disable from start)", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -46,7 +46,7 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul ClusterName: "highlander-e2e-cluster1", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/embedded-capi-disabled/suite_test.go b/test/e2e/suites/embedded-capi-disabled/suite_test.go index 397bcf7e..8daaba51 100644 --- a/test/e2e/suites/embedded-capi-disabled/suite_test.go +++ b/test/e2e/suites/embedded-capi-disabled/suite_test.go @@ -20,21 +20,25 @@ limitations under the License. package embedded_capi_disabled import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "k8s.io/klog/v2" - "sigs.k8s.io/cluster-api/test/framework/clusterctl" - ctrl "sigs.k8s.io/controller-runtime" - "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" + "k8s.io/klog/v2" + capiframework "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + ctrl "sigs.k8s.io/controller-runtime" ) // Test suite flags. @@ -58,8 +62,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -75,181 +80,222 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-embedded-capi-disabled") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. - // and the deploy Rancher Turtles. - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherFeatures: "embedded-cluster-api=false", - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{ - "cluster-api-operator.cert-manager.enabled": "false", - "rancherTurtles.features.embedded-capi.disabled": "false", - "rancherTurtles.features.managementv3-cluster.enabled": "false", - }, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - // NOTE: there are no short or local tests in this suite - By("Deploying additional infrastructure providers") - awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) - gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) - Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") - Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") - - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.AWSProviderSecret, - e2e.AzureIdentitySecret, - e2e.GCPProviderSecret, - }, - CAPIProvidersYAML: e2e.FullProviders, - TemplateData: map[string]string{ - "AWSEncodedCredentials": awsCreds, - "GCPEncodedCredentials": gcpCreds, - }, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capa-controller-manager", - Namespace: "capa-system", + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + // NOTE: deploy Rancher first with the embedded-cluster-api feature disabled. + // and the deploy Rancher Turtles. + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherFeatures: "embedded-cluster-api=false", + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "cluster-api-operator.cert-manager.enabled": "false", + "rancherTurtles.features.embedded-capi.disabled": "false", + "rancherTurtles.features.managementv3-cluster.enabled": "false", }, - { - Name: "capz-controller-manager", - Namespace: "capz-system", + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + // NOTE: there are no short or local tests in this suite + By("Deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + e2e.GCPProviderSecret, + }, + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": awsCreds, + "GCPEncodedCredentials": gcpCreds, }, - { - Name: "capg-controller-manager", - Namespace: "capg-system", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + { + Name: "capg-controller-manager", + Namespace: "capg-system", + }, + }, + }) + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), }, - }, - }) - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), + }) + + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) diff --git a/test/e2e/suites/etcd-snapshot-restore/etcd_snapshot_restore_test.go b/test/e2e/suites/etcd-snapshot-restore/etcd_snapshot_restore_test.go index 7aaa9d68..1cccb01f 100644 --- a/test/e2e/suites/etcd-snapshot-restore/etcd_snapshot_restore_test.go +++ b/test/e2e/suites/etcd-snapshot-restore/etcd_snapshot_restore_test.go @@ -32,14 +32,14 @@ import ( var _ = Describe("[Docker] [RKE2] Perform an ETCD backup and restore of the cluster", Label(e2e.LocalTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.ETCDSnapshotRestore(ctx, func() specs.ETCDSnapshotRestoreInput { return specs.ETCDSnapshotRestoreInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -47,7 +47,7 @@ var _ = Describe("[Docker] [RKE2] Perform an ETCD backup and restore of the clus ClusterName: "etcd-snapshot-restore", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](0), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/etcd-snapshot-restore/suite_test.go b/test/e2e/suites/etcd-snapshot-restore/suite_test.go index f1b88fe1..392183b8 100644 --- a/test/e2e/suites/etcd-snapshot-restore/suite_test.go +++ b/test/e2e/suites/etcd-snapshot-restore/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package etcd_snapshot_restore import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -32,6 +36,7 @@ import ( "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" "k8s.io/klog/v2" + capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" ) @@ -57,8 +62,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -74,150 +80,191 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-import-gitops") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, + }) + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "rancherTurtles.features.etcd-snapshot-restore.enabled": "true", // enable etcd-snapshot-restore feature + }, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), + }) + + testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + Namespace: framework.DefaultRancherTurtlesNamespace, + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{ - "rancherTurtles.features.etcd-snapshot-restore.enabled": "true", // enable etcd-snapshot-restore feature - }, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - Namespace: framework.DefaultRancherTurtlesNamespace, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel diff --git a/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go b/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go index b93fcde0..e8dc5da5 100644 --- a/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go +++ b/test/e2e/suites/import-gitops-v3/import_gitops_v3_test.go @@ -31,14 +31,14 @@ import ( var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -46,7 +46,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele ClusterName: "clusterv3-auto-import-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -64,14 +64,14 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Create and dele var _ = Describe("[Docker] [RKE2] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.LocalTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -79,7 +79,7 @@ var _ = Describe("[Docker] [RKE2] - [management.cattle.io/v3] Create and delete ClusterName: "clusterv3-auto-import-rke2", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -97,21 +97,21 @@ var _ = Describe("[Docker] [RKE2] - [management.cattle.io/v3] Create and delete var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CAPI cluster from cluster class", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ArtifactFolder: artifactsFolder, ClusterTemplate: e2e.CAPIAzureAKSTopology, ClusterName: "highlander-e2e-topology", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -128,14 +128,14 @@ var _ = Describe("[Azure] [AKS] - [management.cattle.io/v3] Create and delete CA var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -143,7 +143,7 @@ var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI ClusterName: "clusterv3-eks", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -160,14 +160,14 @@ var _ = Describe("[AWS] [EKS] - [management.cattle.io/v3] Create and delete CAPI var _ = Describe("[GCP] [GKE] - [management.cattle.io/v3] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateMgmtV3UsingGitOpsSpec(ctx, func() specs.CreateMgmtV3UsingGitOpsSpecInput { return specs.CreateMgmtV3UsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -175,7 +175,7 @@ var _ = Describe("[GCP] [GKE] - [management.cattle.io/v3] Create and delete CAPI ClusterName: "clusterv3-gke", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/import-gitops-v3/suite_test.go b/test/e2e/suites/import-gitops-v3/suite_test.go index 9919e76e..378c58c6 100644 --- a/test/e2e/suites/import-gitops-v3/suite_test.go +++ b/test/e2e/suites/import-gitops-v3/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package import_gitops_v3 import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -61,8 +65,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -78,192 +83,233 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-managementv3") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{ - "rancherTurtles.features.addon-provider-fleet.enabled": "true", - }, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - By("Waiting for CAAPF deployment to be available") - capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), - Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ - Name: "caapf-controller-manager", - Namespace: e2e.RancherTurtlesNamespace, - }}, - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) - - By("Setting the CAAPF config to use hostNetwork") - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) - - if !shortTestOnly() && !localTestOnly() { - By("Running full tests, deploying additional infrastructure providers") - awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) - gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) - Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") - Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") - - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.AWSProviderSecret, - e2e.AzureIdentitySecret, - e2e.GCPProviderSecret, - }, - CAPIProvidersYAML: e2e.FullProviders, - TemplateData: map[string]string{ - "AWSEncodedCredentials": awsCreds, - "GCPEncodedCredentials": gcpCreds, - }, + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capa-controller-manager", - Namespace: "capa-system", + AdditionalValues: map[string]string{ + "rancherTurtles.features.addon-provider-fleet.enabled": "true", + }, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + By("Waiting for CAAPF deployment to be available") + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ + Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "caapf-controller-manager", + Namespace: e2e.RancherTurtlesNamespace, + }}, + }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + + By("Setting the CAAPF config to use hostNetwork") + Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) + + if !shortTestOnly() && !localTestOnly() { + By("Running full tests, deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + e2e.GCPProviderSecret, }, - { - Name: "capz-controller-manager", - Namespace: "capz-system", + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": awsCreds, + "GCPEncodedCredentials": gcpCreds, }, - { - Name: "capg-controller-manager", - Namespace: "capg-system", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + { + Name: "capg-controller-manager", + Namespace: "capg-system", + }, }, + }) + } + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) - } - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel diff --git a/test/e2e/suites/import-gitops/import_gitops_test.go b/test/e2e/suites/import-gitops/import_gitops_test.go index 22e19af2..93b0ceeb 100644 --- a/test/e2e/suites/import-gitops/import_gitops_test.go +++ b/test/e2e/suites/import-gitops/import_gitops_test.go @@ -34,14 +34,14 @@ import ( var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.ShortTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -49,7 +49,7 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit ClusterName: "clusterv1-docker-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -64,14 +64,14 @@ var _ = Describe("[Docker] [Kubeadm] Create and delete CAPI cluster functionalit var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.FullTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -79,7 +79,7 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul ClusterName: "clusterv1-eks", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -93,14 +93,14 @@ var _ = Describe("[AWS] [EKS] Create and delete CAPI cluster functionality shoul var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.LocalTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -108,7 +108,7 @@ var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionali ClusterName: "cluster-vsphere-kubeadm", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, @@ -126,14 +126,14 @@ var _ = Describe("[vSphere] [Kubeadm] Create and delete CAPI cluster functionali var _ = Describe("[vSphere] [RKE2] Create and delete CAPI cluster functionality should work with namespace auto-import", Label(e2e.LocalTestLabel), func() { BeforeEach(func() { - SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + SetClient(bootstrapClusterProxy.GetClient()) SetContext(ctx) }) specs.CreateUsingGitOpsSpec(ctx, func() specs.CreateUsingGitOpsSpecInput { return specs.CreateUsingGitOpsSpecInput{ E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -141,7 +141,7 @@ var _ = Describe("[vSphere] [RKE2] Create and delete CAPI cluster functionality ClusterName: "cluster-vsphere-rke2", ControlPlaneMachineCount: ptr.To[int](1), WorkerMachineCount: ptr.To[int](1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/import-gitops/suite_test.go b/test/e2e/suites/import-gitops/suite_test.go index 9be53f5f..ff6915a8 100644 --- a/test/e2e/suites/import-gitops/suite_test.go +++ b/test/e2e/suites/import-gitops/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package import_gitops import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -60,8 +64,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -77,249 +82,290 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-import-gitops") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, - }) +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) - hostName = rancherHookResult.HostName + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - testenv.DeployRancher(ctx, rancherInput) + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - if shortTestOnly() { - chartMuseumDeployInput := testenv.DeployChartMuseumInput{ - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - testenv.PreChartMuseumInstallHook(&chartMuseumDeployInput, e2eConfig) + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - testenv.DeployChartMuseum(ctx, chartMuseumDeployInput) + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, + }) - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) - rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - By("Waiting for CAAPF deployment to be available") - capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), - Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ - Name: "caapf-controller-manager", - Namespace: e2e.RancherTurtlesNamespace, - }}, - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) - - By("Setting the CAAPF config to use hostNetwork") - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) - } else { - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, } - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller - testenv.DeployRancherTurtles(ctx, rtInput) - } - - if !shortTestOnly() && !localTestOnly() { - By("Running full tests, deploying additional infrastructure providers") - awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) - gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) - Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") - Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + if shortTestOnly() { + chartMuseumDeployInput := testenv.DeployChartMuseumInput{ + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + testenv.PreChartMuseumInstallHook(&chartMuseumDeployInput, e2eConfig) + + testenv.DeployChartMuseum(ctx, chartMuseumDeployInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{}, + } + + rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" + rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + By("Waiting for CAAPF deployment to be available") + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ + Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "caapf-controller-manager", + Namespace: e2e.RancherTurtlesNamespace, + }}, + }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + + By("Setting the CAAPF config to use hostNetwork") + Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) + } else { + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{}, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + testenv.DeployRancherTurtles(ctx, rtInput) + } - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.AWSProviderSecret, - e2e.AzureIdentitySecret, - e2e.GCPProviderSecret, - }, - CAPIProvidersYAML: e2e.FullProviders, - TemplateData: map[string]string{ - "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), - "GCPEncodedCredentials": gcpCreds, - }, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capa-controller-manager", - Namespace: "capa-system", + if !shortTestOnly() && !localTestOnly() { + By("Running full tests, deploying additional infrastructure providers") + awsCreds := e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar) + gcpCreds := e2eConfig.GetVariable(e2e.CapgEncodedCredentialsVar) + Expect(awsCreds).ToNot(BeEmpty(), "AWS creds required for full test") + Expect(gcpCreds).ToNot(BeEmpty(), "GCP creds required for full test") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.AWSProviderSecret, + e2e.AzureIdentitySecret, + e2e.GCPProviderSecret, + }, + CAPIProvidersYAML: e2e.FullProviders, + TemplateData: map[string]string{ + "AWSEncodedCredentials": e2eConfig.GetVariable(e2e.CapaEncodedCredentialsVar), + "GCPEncodedCredentials": gcpCreds, }, - { - Name: "capz-controller-manager", - Namespace: "capz-system", + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capa-controller-manager", + Namespace: "capa-system", + }, + { + Name: "capz-controller-manager", + Namespace: "capz-system", + }, + { + Name: "capg-controller-manager", + Namespace: "capg-system", + }, }, - { - Name: "capg-controller-manager", - Namespace: "capg-system", + }) + } else if Label(e2e.LocalTestLabel).MatchesLabelFilter(GinkgoLabelFilter()) { + By("Running local vSphere tests, deploying vSphere infrastructure provider") + + testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + CAPIProvidersSecretsYAML: [][]byte{ + e2e.VSphereProviderSecret, }, + CAPIProvidersYAML: e2e.CapvProvider, + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + WaitForDeployments: []testenv.NamespaceName{ + { + Name: "capv-controller-manager", + Namespace: "capv-system", + }, + }, + }) + } + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), }) - } else if Label(e2e.LocalTestLabel).MatchesLabelFilter(GinkgoLabelFilter()) { - By("Running local vSphere tests, deploying vSphere infrastructure provider") - testenv.CAPIOperatorDeployProvider(ctx, testenv.CAPIOperatorDeployProviderInput{ + testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - CAPIProvidersSecretsYAML: [][]byte{ - e2e.VSphereProviderSecret, - }, - CAPIProvidersYAML: e2e.CapvProvider, - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - WaitForDeployments: []testenv.NamespaceName{ - { - Name: "capv-controller-manager", - Namespace: "capv-system", - }, - }, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + Namespace: framework.DefaultRancherTurtlesNamespace, + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) - } - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - Namespace: framework.DefaultRancherTurtlesNamespace, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel diff --git a/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go index a8929575..101105de 100644 --- a/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go +++ b/test/e2e/suites/migrate-gitops/migrate_gitops_provv1_mgmtv3_test.go @@ -33,7 +33,7 @@ import ( var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to management v3 cluster functionality should work", Label(e2e.ShortTestLabel), func() { BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) }) @@ -42,7 +42,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to m HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), ChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), E2EConfig: e2eConfig, - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + BootstrapClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: flagVals.ConfigPath, ClusterctlBinaryPath: e2eConfig.GetVariable(e2e.ClusterctlBinaryPathVar), ArtifactFolder: artifactsFolder, @@ -50,7 +50,7 @@ var _ = Describe("[Docker] [Kubeadm] - [management.cattle.io/v3] Migrate v1 to m ClusterName: "clusterv3-migrated", ControlPlaneMachineCount: ptr.To(1), WorkerMachineCount: ptr.To(1), - GitAddr: giteaResult.GitAddress, + GitAddr: gitAddress, GitAuthSecretName: e2e.AuthSecretName, SkipCleanup: false, SkipDeletionTest: false, diff --git a/test/e2e/suites/migrate-gitops/suite_test.go b/test/e2e/suites/migrate-gitops/suite_test.go index 774da87b..e3b92c29 100644 --- a/test/e2e/suites/migrate-gitops/suite_test.go +++ b/test/e2e/suites/migrate-gitops/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package migrate_gitops import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -61,8 +65,9 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult - giteaResult *testenv.DeployGiteaResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy + gitAddress string ) func init() { @@ -78,177 +83,218 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-migrate-gitops") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), - RancherHost: hostName, - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, + }) + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, + }) + + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherAlphaRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherAlphaUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherAlphaPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherAlphaVersionVar), + RancherHost: hostName, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + chartMuseumDeployInput := testenv.DeployChartMuseumInput{ + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + testenv.PreChartMuseumInstallHook(&chartMuseumDeployInput, e2eConfig) + + testenv.DeployChartMuseum(ctx, chartMuseumDeployInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{}, + } + + rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" + rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + By("Waiting for CAAPF deployment to be available") + capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ + Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ + Name: "caapf-controller-manager", + Namespace: e2e.RancherTurtlesNamespace, + }}, + }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) + + By("Setting the CAAPF config to use hostNetwork") + Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) + + giteaInput := testenv.DeployGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), + ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), + ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), + ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), + ValuesFilePath: "../../data/gitea/values.yaml", + Values: map[string]string{ + "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), + "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + }, + RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), + ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), + AuthSecretName: e2e.AuthSecretName, + Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), + Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), + CustomIngressConfig: e2e.GiteaIngress, + Variables: e2eConfig.Variables, + } + + testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) + + giteaResult := testenv.DeployGitea(ctx, giteaInput) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + giteaResult.GitAddress, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(5)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + gitAddress = parts[2] + + configBytes, err := base64.StdEncoding.DecodeString(parts[3]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[4] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), + }) + + testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + Namespace: framework.DefaultRancherTurtlesNamespace, + DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - chartMuseumDeployInput := testenv.DeployChartMuseumInput{ - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartsPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - ChartVersion: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - WaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - testenv.PreChartMuseumInstallHook(&chartMuseumDeployInput, e2eConfig) - - testenv.DeployChartMuseum(ctx, chartMuseumDeployInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - - rtInput.AdditionalValues["rancherTurtles.features.addon-provider-fleet.enabled"] = "true" - rtInput.AdditionalValues["rancherTurtles.features.managementv3-cluster.enabled"] = "false" // disable the default management.cattle.io/v3 controller - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - By("Waiting for CAAPF deployment to be available") - capiframework.WaitForDeploymentsAvailable(ctx, capiframework.WaitForDeploymentsAvailableInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), - Deployment: &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{ - Name: "caapf-controller-manager", - Namespace: e2e.RancherTurtlesNamespace, - }}, - }, e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers")...) - - By("Setting the CAAPF config to use hostNetwork") - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, e2e.AddonProviderFleetHostNetworkPatch)).To(Succeed()) - - giteaInput := testenv.DeployGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - ChartRepoName: e2eConfig.GetVariable(e2e.GiteaRepoNameVar), - ChartRepoURL: e2eConfig.GetVariable(e2e.GiteaRepoURLVar), - ChartName: e2eConfig.GetVariable(e2e.GiteaChartNameVar), - ChartVersion: e2eConfig.GetVariable(e2e.GiteaChartVersionVar), - ValuesFilePath: "../../data/gitea/values.yaml", - Values: map[string]string{ - "gitea.admin.username": e2eConfig.GetVariable(e2e.GiteaUserNameVar), - "gitea.admin.password": e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - }, - RolloutWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea"), - ServiceWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-service"), - AuthSecretName: e2e.AuthSecretName, - Username: e2eConfig.GetVariable(e2e.GiteaUserNameVar), - Password: e2eConfig.GetVariable(e2e.GiteaUserPasswordVar), - CustomIngressConfig: e2e.GiteaIngress, - Variables: e2eConfig.Variables, - } - - testenv.PreGiteaInstallHook(&giteaInput, e2eConfig) - - giteaResult = testenv.DeployGitea(ctx, giteaInput) -}) - -var _ = AfterSuite(func() { - testenv.UninstallGitea(ctx, testenv.UninstallGiteaInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-gitea-uninstall"), - }) - - testenv.UninstallRancherTurtles(ctx, testenv.UninstallRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - Namespace: framework.DefaultRancherTurtlesNamespace, - DeleteWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-turtles-uninstall"), - }) - - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) func shortTestOnly() bool { return GinkgoLabelFilter() == e2e.ShortTestLabel diff --git a/test/e2e/suites/update-labels/suite_test.go b/test/e2e/suites/update-labels/suite_test.go index 159218d5..c98437af 100644 --- a/test/e2e/suites/update-labels/suite_test.go +++ b/test/e2e/suites/update-labels/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package update_labels import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -35,6 +39,7 @@ import ( "github.com/rancher/turtles/test/e2e" "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" + capiframework "sigs.k8s.io/cluster-api/test/framework" ) // Test suite flags. @@ -58,7 +63,8 @@ var ( ctx = context.Background() - setupClusterResult *testenv.SetupTestClusterResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy ) func init() { @@ -74,116 +80,155 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-import-gitops") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, + }) + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: framework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{ - "cluster-api-operator.cluster-api.version": e2e.CAPIVersion, - "rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in the chart changes - }, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - testenv.RestartRancher(ctx, testenv.RestartRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - RancherNamespace: e2e.RancherNamespace, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }) -}) - -var _ = AfterSuite(func() { - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: framework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{ + "cluster-api-operator.cluster-api.version": e2e.CAPIVersion, + "rancherTurtles.features.rancher-kubeconfigs.label": "true", // force to be true even if the default in the chart changes + }, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + testenv.RestartRancher(ctx, testenv.RestartRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + RancherNamespace: e2e.RancherNamespace, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + }) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(4)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + + configBytes, err := base64.StdEncoding.DecodeString(parts[2]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[3] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) diff --git a/test/e2e/suites/update-labels/update_labels_test.go b/test/e2e/suites/update-labels/update_labels_test.go index d66fde17..9a927bc3 100644 --- a/test/e2e/suites/update-labels/update_labels_test.go +++ b/test/e2e/suites/update-labels/update_labels_test.go @@ -39,7 +39,6 @@ import ( ) var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still work with CAPI 1.5.x and label renaming", Label(e2e.FullTestLabel), func() { - var ( specName = "updatelabels" rancherKubeconfig *turtlesframework.RancherGetClusterKubeconfigResult @@ -48,7 +47,7 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w ) BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) @@ -73,11 +72,11 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w lookupResult := &turtlesframework.RancherLookupUserResult{} turtlesframework.RancherLookupUser(ctx, turtlesframework.RancherLookupUserInput{ Username: "admin", - ClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterProxy: bootstrapClusterProxy, }, lookupResult) turtlesframework.CreateSecret(ctx, turtlesframework.CreateSecretInput{ - Creator: setupClusterResult.BootstrapClusterProxy.GetClient(), + Creator: bootstrapClusterProxy.GetClient(), Name: credsSecretName, Namespace: "cattle-global-data", Type: corev1.SecretTypeOpaque, @@ -109,7 +108,7 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w } }) Expect(err).ToNot(HaveOccurred()) - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config") + Expect(bootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config") cluster, err := envsubst.Eval(string(e2e.V2ProvAzureCluster), func(s string) string { switch s { @@ -128,24 +127,24 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w } }) Expect(err).ToNot(HaveOccurred()) - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config") + Expect(bootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config") By("Waiting for the rancher cluster record to appear") rancherCluster = &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ Namespace: "fleet-default", Name: clusterName, }} - Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) By("Waiting for the rancher cluster to have a deployed agent") - Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue())) + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue())) By("Waiting for the rancher cluster to be ready") - Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) By("Getting kubeconfig from Rancher for new cluster") turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name), Namespace: rancherCluster.Namespace, RancherServerURL: hostName, @@ -169,21 +168,21 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w }) AfterEach(func() { - err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) + err := testenv.CollectArtifacts(ctx, bootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, bootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the bootstrap cluster: %v\n", err) } - err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) + err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, bootstrapClusterProxy.GetName(), clusterName+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) } By("Deleting cluster from Rancher") - err = setupClusterResult.BootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster) + err = bootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster) Expect(err).NotTo(HaveOccurred(), "Failed to delete rancher cluster") By("Waiting for the rancher cluster record to be removed") - Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") }) }) diff --git a/test/e2e/suites/v2prov/suite_test.go b/test/e2e/suites/v2prov/suite_test.go index 30b868d7..cba36f13 100644 --- a/test/e2e/suites/v2prov/suite_test.go +++ b/test/e2e/suites/v2prov/suite_test.go @@ -20,10 +20,14 @@ limitations under the License. package v2prov import ( + "bytes" "context" + "encoding/base64" + "encoding/gob" "fmt" "path/filepath" "strconv" + "strings" "testing" . "github.com/onsi/ginkgo/v2" @@ -35,6 +39,7 @@ import ( "github.com/rancher/turtles/test/e2e" turtlesframework "github.com/rancher/turtles/test/framework" "github.com/rancher/turtles/test/testenv" + capiframework "sigs.k8s.io/cluster-api/test/framework" ) // Test suite flags. @@ -58,7 +63,8 @@ var ( artifactsFolder string - setupClusterResult *testenv.SetupTestClusterResult + setupClusterResult *testenv.SetupTestClusterResult + bootstrapClusterProxy capiframework.ClusterProxy ) func init() { @@ -74,115 +80,154 @@ func TestE2E(t *testing.T) { RunSpecs(t, "rancher-turtles-e2e-v2prov") } -var _ = BeforeSuite(func() { - By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) - Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") - e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) - e2e.ValidateE2EConfig(e2eConfig) - - artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) - - preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) - - By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) - clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) - - useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") - - setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ - UseExistingCluster: useExistingCluter, - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - Scheme: e2e.InitScheme(), - ArtifactFolder: artifactsFolder, - KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - CustomClusterProvider: preSetupOutput.CustomClusterProvider, - }) - - testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), - IngressType: preSetupOutput.IngressType, - CustomIngress: e2e.NginxIngress, - CustomIngressNamespace: e2e.NginxIngressNamespace, - CustomIngressDeployment: e2e.NginxIngressDeployment, - IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), - NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), - NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), - NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), - NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), - DefaultIngressClassPatch: e2e.IngressClassPatch, - }) - - rancherInput := testenv.DeployRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), - InstallCertManager: true, - CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), - CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), - CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), - RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), - RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), - RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), - RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), - Development: true, - RancherNamespace: e2e.RancherNamespace, - RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), - RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar), - RancherPatches: [][]byte{e2e.RancherSettingPatch}, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - Variables: e2eConfig.Variables, - } - - rancherHookResult := testenv.PreRancherInstallHook( - &testenv.PreRancherInstallHookInput{ - Ctx: ctx, - RancherInput: &rancherInput, - E2EConfig: e2eConfig, - SetupClusterResult: setupClusterResult, - PreSetupOutput: preSetupOutput, +var _ = SynchronizedBeforeSuite( + func() []byte { + By(fmt.Sprintf("Loading the e2e test configuration from %q", flagVals.ConfigPath)) + Expect(flagVals.ConfigPath).To(BeAnExistingFile(), "Invalid test suite argument. e2e.config should be an existing file.") + e2eConfig = e2e.LoadE2EConfig(flagVals.ConfigPath) + e2e.ValidateE2EConfig(e2eConfig) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + preSetupOutput := testenv.PreManagementClusterSetupHook(e2eConfig) + + By(fmt.Sprintf("Creating a clusterctl config into %q", artifactsFolder)) + clusterctlConfigPath = e2e.CreateClusterctlLocalRepository(ctx, e2eConfig, filepath.Join(artifactsFolder, "repository")) + + useExistingCluter, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.UseExistingClusterVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the USE_EXISTING_CLUSTER variable") + + setupClusterResult = testenv.SetupTestCluster(ctx, testenv.SetupTestClusterInput{ + UseExistingCluster: useExistingCluter, + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + Scheme: e2e.InitScheme(), + ArtifactFolder: artifactsFolder, + KubernetesVersion: e2eConfig.GetVariable(e2e.KubernetesManagementVersionVar), + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + CustomClusterProvider: preSetupOutput.CustomClusterProvider, + }) + + testenv.RancherDeployIngress(ctx, testenv.RancherDeployIngressInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher-ingress.yaml"), + IngressType: preSetupOutput.IngressType, + CustomIngress: e2e.NginxIngress, + CustomIngressNamespace: e2e.NginxIngressNamespace, + CustomIngressDeployment: e2e.NginxIngressDeployment, + IngressWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + NgrokApiKey: e2eConfig.GetVariable(e2e.NgrokApiKeyVar), + NgrokAuthToken: e2eConfig.GetVariable(e2e.NgrokAuthTokenVar), + NgrokPath: e2eConfig.GetVariable(e2e.NgrokPathVar), + NgrokRepoName: e2eConfig.GetVariable(e2e.NgrokRepoNameVar), + NgrokRepoURL: e2eConfig.GetVariable(e2e.NgrokUrlVar), + DefaultIngressClassPatch: e2e.IngressClassPatch, }) - hostName = rancherHookResult.HostName - - testenv.DeployRancher(ctx, rancherInput) - - rtInput := testenv.DeployRancherTurtlesInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), - TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), - CAPIProvidersYAML: e2e.CapiProviders, - Namespace: turtlesframework.DefaultRancherTurtlesNamespace, - Image: "ghcr.io/rancher/turtles-e2e", - Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), - WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), - AdditionalValues: map[string]string{}, - } - - testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) - - testenv.DeployRancherTurtles(ctx, rtInput) - - testenv.RestartRancher(ctx, testenv.RestartRancherInput{ - BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, - RancherNamespace: e2e.RancherNamespace, - RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), - }) -}) - -var _ = AfterSuite(func() { - skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) - Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") - - testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ - SetupTestClusterResult: *setupClusterResult, - SkipCleanup: skipCleanup, - ArtifactFolder: artifactsFolder, - }) -}) + rancherInput := testenv.DeployRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + HelmExtraValuesPath: filepath.Join(e2eConfig.GetVariable(e2e.HelmExtraValuesFolderVar), "deploy-rancher.yaml"), + InstallCertManager: true, + CertManagerChartPath: e2eConfig.GetVariable(e2e.CertManagerPathVar), + CertManagerUrl: e2eConfig.GetVariable(e2e.CertManagerUrlVar), + CertManagerRepoName: e2eConfig.GetVariable(e2e.CertManagerRepoNameVar), + RancherChartRepoName: e2eConfig.GetVariable(e2e.RancherRepoNameVar), + RancherChartURL: e2eConfig.GetVariable(e2e.RancherUrlVar), + RancherChartPath: e2eConfig.GetVariable(e2e.RancherPathVar), + RancherVersion: e2eConfig.GetVariable(e2e.RancherVersionVar), + Development: true, + RancherNamespace: e2e.RancherNamespace, + RancherPassword: e2eConfig.GetVariable(e2e.RancherPasswordVar), + RancherFeatures: e2eConfig.GetVariable(e2e.RancherFeaturesVar), + RancherPatches: [][]byte{e2e.RancherSettingPatch}, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + ControllerWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + Variables: e2eConfig.Variables, + } + + rancherHookResult := testenv.PreRancherInstallHook( + &testenv.PreRancherInstallHookInput{ + Ctx: ctx, + RancherInput: &rancherInput, + E2EConfig: e2eConfig, + SetupClusterResult: setupClusterResult, + PreSetupOutput: preSetupOutput, + }) + + testenv.DeployRancher(ctx, rancherInput) + + rtInput := testenv.DeployRancherTurtlesInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + HelmBinaryPath: e2eConfig.GetVariable(e2e.HelmBinaryPathVar), + TurtlesChartPath: e2eConfig.GetVariable(e2e.TurtlesPathVar), + CAPIProvidersYAML: e2e.CapiProviders, + Namespace: turtlesframework.DefaultRancherTurtlesNamespace, + Image: "ghcr.io/rancher/turtles-e2e", + Tag: e2eConfig.GetVariable(e2e.TurtlesVersionVar), + WaitDeploymentsReadyInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-controllers"), + AdditionalValues: map[string]string{}, + } + + testenv.PreRancherTurtlesInstallHook(&rtInput, e2eConfig) + + testenv.DeployRancherTurtles(ctx, rtInput) + + testenv.RestartRancher(ctx, testenv.RestartRancherInput{ + BootstrapClusterProxy: setupClusterResult.BootstrapClusterProxy, + RancherNamespace: e2e.RancherNamespace, + RancherWaitInterval: e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher"), + }) + + // encode the e2e config into the byte array. + var configBuf bytes.Buffer + enc := gob.NewEncoder(&configBuf) + Expect(enc.Encode(e2eConfig)).To(Succeed()) + configStr := base64.StdEncoding.EncodeToString(configBuf.Bytes()) + + return []byte( + strings.Join([]string{ + setupClusterResult.ClusterName, + setupClusterResult.KubeconfigPath, + configStr, + rancherHookResult.HostName, + }, ","), + ) + }, + func(sharedData []byte) { + parts := strings.Split(string(sharedData), ",") + Expect(parts).To(HaveLen(4)) + + clusterName := parts[0] + kubeconfigPath := parts[1] + + configBytes, err := base64.StdEncoding.DecodeString(parts[2]) + Expect(err).NotTo(HaveOccurred()) + buf := bytes.NewBuffer(configBytes) + dec := gob.NewDecoder(buf) + Expect(dec.Decode(&e2eConfig)).To(Succeed()) + + artifactsFolder = e2eConfig.GetVariable(e2e.ArtifactsFolderVar) + + bootstrapClusterProxy = capiframework.NewClusterProxy(string(clusterName), string(kubeconfigPath), e2e.InitScheme(), capiframework.WithMachineLogCollector(capiframework.DockerLogCollector{})) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "cluster proxy should not be nil") + + hostName = parts[3] + }, +) + +var _ = SynchronizedAfterSuite( + func() { + }, + func() { + skipCleanup, err := strconv.ParseBool(e2eConfig.GetVariable(e2e.SkipResourceCleanupVar)) + Expect(err).ToNot(HaveOccurred(), "Failed to parse the SKIP_RESOURCE_CLEANUP variable") + + testenv.CleanupTestCluster(ctx, testenv.CleanupTestClusterInput{ + SetupTestClusterResult: *setupClusterResult, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactsFolder, + }) + }, +) diff --git a/test/e2e/suites/v2prov/v2prov_test.go b/test/e2e/suites/v2prov/v2prov_test.go index a5631efe..add1603a 100644 --- a/test/e2e/suites/v2prov/v2prov_test.go +++ b/test/e2e/suites/v2prov/v2prov_test.go @@ -48,7 +48,7 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w ) BeforeEach(func() { - komega.SetClient(setupClusterResult.BootstrapClusterProxy.GetClient()) + komega.SetClient(bootstrapClusterProxy.GetClient()) komega.SetContext(ctx) rancherKubeconfig = new(turtlesframework.RancherGetClusterKubeconfigResult) @@ -73,11 +73,11 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w lookupResult := &turtlesframework.RancherLookupUserResult{} turtlesframework.RancherLookupUser(ctx, turtlesframework.RancherLookupUserInput{ Username: "admin", - ClusterProxy: setupClusterResult.BootstrapClusterProxy, + ClusterProxy: bootstrapClusterProxy, }, lookupResult) turtlesframework.CreateSecret(ctx, turtlesframework.CreateSecretInput{ - Creator: setupClusterResult.BootstrapClusterProxy.GetClient(), + Creator: bootstrapClusterProxy.GetClient(), Name: credsSecretName, Namespace: "cattle-global-data", Type: corev1.SecretTypeOpaque, @@ -109,7 +109,7 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w } }) Expect(err).ToNot(HaveOccurred()) - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config") + Expect(bootstrapClusterProxy.Apply(ctx, []byte(rkeConfig))).To(Succeed(), "Failed apply Digital Ocean RKE config") cluster, err := envsubst.Eval(string(e2e.V2ProvAzureCluster), func(s string) string { switch s { @@ -128,24 +128,24 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w } }) Expect(err).ToNot(HaveOccurred()) - Expect(setupClusterResult.BootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config") + Expect(bootstrapClusterProxy.Apply(ctx, []byte(cluster))).To(Succeed(), "Failed apply Digital Ocean cluster config") By("Waiting for the rancher cluster record to appear") rancherCluster = &provisioningv1.Cluster{ObjectMeta: metav1.ObjectMeta{ Namespace: "fleet-default", Name: clusterName, }} - Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-rancher")...).Should(Succeed()) By("Waiting for the rancher cluster to have a deployed agent") - Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue())) + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-v2prov-create")...).Should(HaveField("Status.AgentDeployed", BeTrue())) By("Waiting for the rancher cluster to be ready") - Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) + Eventually(komega.Object(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-rancher")...).Should(HaveField("Status.Ready", BeTrue())) By("Waiting for the CAPI cluster to be connectable using Rancher kubeconfig") turtlesframework.RancherGetClusterKubeconfig(ctx, turtlesframework.RancherGetClusterKubeconfigInput{ - Getter: setupClusterResult.BootstrapClusterProxy.GetClient(), + Getter: bootstrapClusterProxy.GetClient(), SecretName: fmt.Sprintf("%s-kubeconfig", rancherCluster.Name), Namespace: rancherCluster.Namespace, RancherServerURL: hostName, @@ -168,21 +168,21 @@ var _ = Describe("[v2prov] [Azure] Creating a cluster with v2prov should still w }) AfterEach(func() { - err := testenv.CollectArtifacts(ctx, setupClusterResult.BootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) + err := testenv.CollectArtifacts(ctx, bootstrapClusterProxy.GetKubeconfigPath(), path.Join(artifactsFolder, bootstrapClusterProxy.GetName(), clusterName+"bootstrap"+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the bootstrap cluster: %v\n", err) } - err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, setupClusterResult.BootstrapClusterProxy.GetName(), clusterName+specName)) + err = testenv.CollectArtifacts(ctx, rancherKubeconfig.TempFilePath, path.Join(artifactsFolder, bootstrapClusterProxy.GetName(), clusterName+specName)) if err != nil { fmt.Printf("Failed to collect artifacts for the child cluster: %v\n", err) } By("Deleting cluster from Rancher") - err = setupClusterResult.BootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster) + err = bootstrapClusterProxy.GetClient().Delete(ctx, rancherCluster) Expect(err).NotTo(HaveOccurred(), "Failed to delete rancher cluster") By("Waiting for the rancher cluster record to be removed") - Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(setupClusterResult.BootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") + Eventually(komega.Get(rancherCluster), e2eConfig.GetIntervals(bootstrapClusterProxy.GetName(), "wait-azure-delete")...).Should(MatchError(ContainSubstring("not found")), "Rancher cluster should be deleted") }) }) diff --git a/test/testenv/setupcluster.go b/test/testenv/setupcluster.go index 1640f677..9619d5e0 100644 --- a/test/testenv/setupcluster.go +++ b/test/testenv/setupcluster.go @@ -76,6 +76,9 @@ type SetupTestClusterResult struct { // IsolatedHostName is the hostname to use for Rancher in isolated mode IsolatedHostName string + + ClusterName string + KubeconfigPath string } // SetupTestCluster sets up a test cluster for running tests. @@ -92,8 +95,7 @@ func SetupTestCluster(ctx context.Context, input SetupTestClusterInput) *SetupTe result := &SetupTestClusterResult{} By("Setting up the bootstrap cluster") - result.BootstrapClusterProvider, result.BootstrapClusterProxy = setupCluster( - ctx, input.E2EConfig, input.Scheme, clusterName, input.UseExistingCluster, input.KubernetesVersion, input.CustomClusterProvider) + result.setupCluster(ctx, input.E2EConfig, input.Scheme, clusterName, input.UseExistingCluster, input.KubernetesVersion, input.CustomClusterProvider) if input.UseExistingCluster { return result @@ -109,7 +111,7 @@ func SetupTestCluster(ctx context.Context, input SetupTestClusterInput) *SetupTe return result } -func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *runtime.Scheme, clusterName string, useExistingCluster bool, kubernetesVersion string, customClusterProvider CustomClusterProvider) (bootstrap.ClusterProvider, framework.ClusterProxy) { +func (r *SetupTestClusterResult) setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *runtime.Scheme, clusterName string, useExistingCluster bool, kubernetesVersion string, customClusterProvider CustomClusterProvider) { var clusterProvider bootstrap.ClusterProvider kubeconfigPath := "" @@ -134,7 +136,10 @@ func setupCluster(ctx context.Context, config *clusterctl.E2EConfig, scheme *run proxy := framework.NewClusterProxy(clusterName, kubeconfigPath, scheme, framework.WithMachineLogCollector(framework.DockerLogCollector{})) Expect(proxy).ToNot(BeNil(), "Cluster proxy should not be nil") - return clusterProvider, proxy + r.ClusterName = clusterName + r.BootstrapClusterProxy = proxy + r.BootstrapClusterProvider = clusterProvider + r.KubeconfigPath = kubeconfigPath } // getInternalClusterHostname gets the internal by setting it to the IP of the first and only node in the boostrap cluster. Labels the node with