diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 55084a2d..b5f02385 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -40,8 +40,7 @@ jobs: uses: engineerd/setup-kind@v0.5.0 with: version: "v0.20.0" - image: kindest/node:v1.28.0 - config: hack/kind-config.yaml + image: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 - name: Testing kind cluster set-up run: | diff --git a/.gitignore b/.gitignore index 84207959..0008354c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ .idea/ examples/**/vendor/ examples/**/test/ -test/ +/test .devspace/ .DS_Store \ No newline at end of file diff --git a/e2e/plugin/test.go b/e2e/plugin/plugin.go similarity index 100% rename from e2e/plugin/test.go rename to e2e/plugin/plugin.go diff --git a/e2e/test_plugin/syncers/car.go b/e2e/test_plugin/syncers/car.go index 5603bf2b..e3c77425 100644 --- a/e2e/test_plugin/syncers/car.go +++ b/e2e/test_plugin/syncers/car.go @@ -57,23 +57,16 @@ func (s *carSyncer) translateUpdate(ctx context.Context, pObj, vObj *examplev1.C // check annotations & labels changed, updatedAnnotations, updatedLabels := s.TranslateMetadataUpdate(ctx, vObj, pObj) if changed { - updated = newIfNil(updated, pObj) + updated = translator.NewIfNil(updated, pObj) updated.Labels = updatedLabels updated.Annotations = updatedAnnotations } // check spec if !equality.Semantic.DeepEqual(vObj.Spec, pObj.Spec) { - updated = newIfNil(updated, pObj) + updated = translator.NewIfNil(updated, pObj) updated.Spec = vObj.Spec } return updated } - -func newIfNil(updated *examplev1.Car, pObj *examplev1.Car) *examplev1.Car { - if updated == nil { - return pObj.DeepCopy() - } - return updated -} diff --git a/examples/bootstrap-with-deployment/README.md b/examples/bootstrap-with-deployment/README.md index 4d304254..3a215577 100644 --- a/examples/bootstrap-with-deployment/README.md +++ b/examples/bootstrap-with-deployment/README.md @@ -26,6 +26,9 @@ kubectl get po -n my-vcluster ## Building the Plugin To just build the plugin image and push it to the registry, run: ``` +# Make sure vendor is updated +go mod vendor + # Build docker build . -t my-repo/my-plugin:0.0.1 @@ -45,7 +48,6 @@ General vcluster plugin project structure: ├── devspace.yaml # Development environment definition ├── devspace_start.sh # Development entrypoint script ├── Dockerfile # Production Dockerfile -├── Dockerfile.dev # Development Dockerfile ├── main.go # Go Entrypoint ├── plugin.yaml # Plugin Helm Values ├── syncers/ # Plugin Syncers @@ -59,8 +61,6 @@ Before starting to develop, make sure you have installed the following tools on - [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.6.0 or higher - [DevSpace](https://devspace.sh/cli/docs/quickstart), which is used to spin up a development environment -If you want to develop within a remote Kubernetes cluster (as opposed to docker-desktop or minikube), make sure to exchange `PLUGIN_IMAGE` in the `devspace.yaml` with a valid registry path you can push to. - After successfully setting up the tools, start the development environment with: ``` devspace dev -n vcluster @@ -68,16 +68,7 @@ devspace dev -n vcluster After a while a terminal should show up with additional instructions. Enter the following command to start the plugin: ``` -go run -mod vendor main.go -``` - -The output should look something like this: -``` -I0124 11:20:14.702799 4185 logr.go:249] plugin: Try creating context... -I0124 11:20:14.730044 4185 logr.go:249] plugin: Waiting for vcluster to become leader... -I0124 11:20:14.731097 4185 logr.go:249] plugin: Starting syncers... -[...] -I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. +go build -mod vendor -o plugin main.go && /vcluster/syncer start ``` You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. diff --git a/examples/crd-sync/README.md b/examples/crd-sync/README.md index 321a5cae..a761e130 100644 --- a/examples/crd-sync/README.md +++ b/examples/crd-sync/README.md @@ -48,7 +48,6 @@ General vcluster plugin project structure: ├── devspace.yaml # Development environment definition ├── devspace_start.sh # Development entrypoint script ├── Dockerfile # Production Dockerfile -├── Dockerfile.dev # Development Dockerfile ├── main.go # Go Entrypoint ├── plugin.yaml # Plugin Helm Values ├── syncers/ # Plugin Syncers @@ -62,8 +61,6 @@ Before starting to develop, make sure you have installed the following tools on - [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.6.0 or higher - [DevSpace](https://devspace.sh/cli/docs/quickstart), which is used to spin up a development environment -If you want to develop within a remote Kubernetes cluster (as opposed to docker-desktop or minikube), make sure to exchange `PLUGIN_IMAGE` in the `devspace.yaml` with a valid registry path you can push to. - After successfully setting up the tools, start the development environment with: ``` devspace dev -n vcluster @@ -71,16 +68,7 @@ devspace dev -n vcluster After a while a terminal should show up with additional instructions. Enter the following command to start the plugin: ``` -go run -mod vendor main.go -``` - -The output should look something like this: -``` -I0124 11:20:14.702799 4185 logr.go:249] plugin: Try creating context... -I0124 11:20:14.730044 4185 logr.go:249] plugin: Waiting for vcluster to become leader... -I0124 11:20:14.731097 4185 logr.go:249] plugin: Starting syncers... -[...] -I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. +go build -mod vendor -o plugin main.go && /vcluster/syncer start ``` You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. diff --git a/examples/hooks/README.md b/examples/hooks/README.md index 12168b5b..8ab8c501 100644 --- a/examples/hooks/README.md +++ b/examples/hooks/README.md @@ -70,7 +70,6 @@ General vcluster plugin project structure: ├── devspace.yaml # Development environment definition ├── devspace_start.sh # Development entrypoint script ├── Dockerfile # Production Dockerfile -├── Dockerfile.dev # Development Dockerfile ├── main.go # Go Entrypoint ├── plugin.yaml # Plugin Helm Values ├── syncers/ # Plugin Syncers @@ -84,8 +83,6 @@ Before starting to develop, make sure you have installed the following tools on - [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.6.0 or higher - [DevSpace](https://devspace.sh/cli/docs/quickstart), which is used to spin up a development environment -If you want to develop within a remote Kubernetes cluster (as opposed to docker-desktop or minikube), make sure to exchange `PLUGIN_IMAGE` in the `devspace.yaml` with a valid registry path you can push to. - After successfully setting up the tools, start the development environment with: ``` devspace dev -n vcluster @@ -93,16 +90,7 @@ devspace dev -n vcluster After a while a terminal should show up with additional instructions. Enter the following command to start the plugin: ``` -go run -mod vendor main.go -``` - -The output should look something like this: -``` -I0124 11:20:14.702799 4185 logr.go:249] plugin: Try creating context... -I0124 11:20:14.730044 4185 logr.go:249] plugin: Waiting for vcluster to become leader... -I0124 11:20:14.731097 4185 logr.go:249] plugin: Starting syncers... -[...] -I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. +go build -mod vendor -o plugin main.go && /vcluster/syncer start ``` You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. diff --git a/examples/pull-secret-sync/README.md b/examples/pull-secret-sync/README.md index 22cdf02b..1b16f358 100644 --- a/examples/pull-secret-sync/README.md +++ b/examples/pull-secret-sync/README.md @@ -49,7 +49,6 @@ General vcluster plugin project structure: ├── devspace.yaml # Development environment definition ├── devspace_start.sh # Development entrypoint script ├── Dockerfile # Production Dockerfile -├── Dockerfile.dev # Development Dockerfile ├── main.go # Go Entrypoint ├── plugin.yaml # Plugin Helm Values ├── syncers/ # Plugin Syncers @@ -63,8 +62,6 @@ Before starting to develop, make sure you have installed the following tools on - [vcluster CLI](https://www.vcluster.com/docs/getting-started/setup) v0.6.0 or higher - [DevSpace](https://devspace.sh/cli/docs/quickstart), which is used to spin up a development environment -If you want to develop within a remote Kubernetes cluster (as opposed to docker-desktop or minikube), make sure to exchange `PLUGIN_IMAGE` in the `devspace.yaml` with a valid registry path you can push to. - After successfully setting up the tools, start the development environment with: ``` devspace dev -n vcluster @@ -72,16 +69,7 @@ devspace dev -n vcluster After a while a terminal should show up with additional instructions. Enter the following command to start the plugin: ``` -go run -mod vendor main.go -``` - -The output should look something like this: -``` -I0124 11:20:14.702799 4185 logr.go:249] plugin: Try creating context... -I0124 11:20:14.730044 4185 logr.go:249] plugin: Waiting for vcluster to become leader... -I0124 11:20:14.731097 4185 logr.go:249] plugin: Starting syncers... -[...] -I0124 11:20:15.957331 4185 logr.go:249] plugin: Successfully started plugin. +go build -mod vendor -o plugin main.go && /vcluster/syncer start ``` You can now change a file locally in your IDE and then restart the command in the terminal to apply the changes to the plugin. diff --git a/examples/pull-secret-sync/devspace.yaml b/examples/pull-secret-sync/devspace.yaml index 8ba5fdda..bfe2b3b9 100644 --- a/examples/pull-secret-sync/devspace.yaml +++ b/examples/pull-secret-sync/devspace.yaml @@ -19,6 +19,11 @@ deployments: repo: https://charts.loft.sh version: v0.19.0-alpha.3 values: + plugin: + pull-secret-sync: + version: v2 + config: + destinationNamespace: "test" serviceAccount: create: false name: default diff --git a/examples/pull-secret-sync/main.go b/examples/pull-secret-sync/main.go index 8c87feb2..53928f1b 100644 --- a/examples/pull-secret-sync/main.go +++ b/examples/pull-secret-sync/main.go @@ -3,10 +3,29 @@ package main import ( "github.com/loft-sh/vcluster-pull-secret-sync/syncers" "github.com/loft-sh/vcluster-sdk/plugin" + "k8s.io/klog/v2" ) +type PluginConfig struct { + DestinationNamespace string `json:"destinationNamespace,omitempty"` +} + func main() { + // Init plugin ctx := plugin.MustInit() - plugin.MustRegister(syncers.NewPullSecretSyncer(ctx, "todo")) + + // parse plugin config + pluginConfig := &PluginConfig{} + err := plugin.UnmarshalConfig(pluginConfig) + if err != nil { + klog.Fatal("Error parsing plugin config") + } else if pluginConfig.DestinationNamespace == "" { + klog.Fatal("destinationNamespace is empty") + } + + // register syncer + plugin.MustRegister(syncers.NewPullSecretSyncer(ctx, pluginConfig.DestinationNamespace)) + + // start plugin plugin.MustStart() } diff --git a/examples/pull-secret-sync/plugin.yaml b/examples/pull-secret-sync/plugin.yaml index eae47087..04911885 100644 --- a/examples/pull-secret-sync/plugin.yaml +++ b/examples/pull-secret-sync/plugin.yaml @@ -4,4 +4,6 @@ plugin: pull-secret-sync: version: v2 image: ghcr.io/loft-sh/vcluster-example-pull-sycret-sync:v2 - imagePullPolicy: IfNotPresent \ No newline at end of file + imagePullPolicy: IfNotPresent + config: + destinationNamespace: "test" \ No newline at end of file diff --git a/examples/pull-secret-sync/syncers/pull-secret-sync.go b/examples/pull-secret-sync/syncers/pull-secret-sync.go index a6be5759..abdeec5e 100644 --- a/examples/pull-secret-sync/syncers/pull-secret-sync.go +++ b/examples/pull-secret-sync/syncers/pull-secret-sync.go @@ -5,6 +5,7 @@ import ( "fmt" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" + "github.com/loft-sh/vcluster/pkg/controllers/syncer/translator" synctypes "github.com/loft-sh/vcluster/pkg/types" "github.com/loft-sh/vcluster/pkg/util/translate" corev1 "k8s.io/api/core/v1" @@ -65,8 +66,7 @@ func (s *pullSecretSyncer) SyncUp(ctx *synccontext.SyncContext, pObj client.Obje if pSecret.Type != corev1.SecretTypeDockerConfigJson { // ignore secrets that are not of "pull secret" type return ctrl.Result{}, nil - } - if pSecret.GetLabels()[translate.MarkerLabel] != "" { + } else if pSecret.GetLabels()[translate.MarkerLabel] != "" { // ignore Secrets synced to the host by the vcluster return ctrl.Result{}, nil } @@ -183,7 +183,7 @@ func (s *pullSecretSyncer) translateUpdateUp(pObj, vObj *corev1.Secret) *corev1. // sync annotations // we sync all of them from the host and remove any added in the vcluster if !equality.Semantic.DeepEqual(vObj.GetAnnotations(), pObj.GetAnnotations()) { - updated = newIfNil(updated, vObj) + updated = translator.NewIfNil(updated, vObj) updated.Annotations = pObj.GetAnnotations() } @@ -197,7 +197,7 @@ func (s *pullSecretSyncer) translateUpdateUp(pObj, vObj *corev1.Secret) *corev1. expectedLabels[k] = v } if !equality.Semantic.DeepEqual(vObj.GetLabels(), expectedLabels) { - updated = newIfNil(updated, vObj) + updated = translator.NewIfNil(updated, vObj) updated.Labels = expectedLabels } @@ -206,16 +206,9 @@ func (s *pullSecretSyncer) translateUpdateUp(pObj, vObj *corev1.Secret) *corev1. // check data if !equality.Semantic.DeepEqual(vObj.Data, pObj.Data) { - updated = newIfNil(updated, vObj) + updated = translator.NewIfNil(updated, vObj) updated.Data = pObj.Data } return updated } - -func newIfNil(updated *corev1.Secret, pObj *corev1.Secret) *corev1.Secret { - if updated == nil { - return pObj.DeepCopy() - } - return updated -} diff --git a/go.mod b/go.mod index 5811cce7..46b263cf 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/loft-sh/vcluster-sdk go 1.21.5 require ( + github.com/ghodss/yaml v1.0.0 github.com/hashicorp/go-plugin v1.6.0 github.com/loft-sh/log v0.0.0-20230824104949-bd516c25712a github.com/loft-sh/vcluster v0.19.0-alpha.3 @@ -51,7 +52,6 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect diff --git a/hack/kind-config.yaml b/hack/kind-config.yaml deleted file mode 100644 index de622139..00000000 --- a/hack/kind-config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -networking: - disableDefaultCNI: true # disable kindnet - podSubnet: 192.168.0.0/16 # set to Calico's default subnet diff --git a/plugin/manager.go b/plugin/manager.go index 95ffe4e5..58d35c23 100644 --- a/plugin/manager.go +++ b/plugin/manager.go @@ -7,6 +7,7 @@ import ( "os" "sync" + "github.com/ghodss/yaml" "github.com/loft-sh/log/logr" "github.com/loft-sh/vcluster/pkg/controllers/syncer" synccontext "github.com/loft-sh/vcluster/pkg/controllers/syncer/context" @@ -43,6 +44,15 @@ type manager struct { syncers []syncertypes.Base } +func (m *manager) UnmarshalConfig(into interface{}) error { + err := yaml.Unmarshal([]byte(os.Getenv(v2.PluginConfigEnv)), into) + if err != nil { + return fmt.Errorf("unmarshal plugin config: %w", err) + } + + return nil +} + func (m *manager) Init() (*synccontext.RegisterContext, error) { return m.InitWithOptions(Options{}) } diff --git a/plugin/plugin.go b/plugin/plugin.go index a36e3636..bd06870d 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -47,3 +47,7 @@ func MustStart() { func Start() error { return defaultManager.Start() } + +func UnmarshalConfig(into interface{}) error { + return defaultManager.UnmarshalConfig(into) +} diff --git a/plugin/types.go b/plugin/types.go index a306bf2c..5cb4000c 100644 --- a/plugin/types.go +++ b/plugin/types.go @@ -35,6 +35,10 @@ type Manager interface { // the functionality if the current vcluster pod is the current leader and // will stop if the pod will lose leader election. Start() error + + // UnmarshalConfig retrieves the plugin config from environment and parses it into + // the given object. + UnmarshalConfig(into interface{}) error } // ClientHook tells the sdk that this action watches on certain vcluster requests and wants diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/framework.go b/vendor/github.com/loft-sh/vcluster/test/framework/framework.go new file mode 100644 index 00000000..e9a540a5 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/test/framework/framework.go @@ -0,0 +1,231 @@ +package framework + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd" + "github.com/loft-sh/vcluster/cmd/vclusterctl/flags" + logutil "github.com/loft-sh/vcluster/pkg/util/log" + "github.com/loft-sh/vcluster/pkg/util/translate" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + PollTimeout = time.Minute + DefaultVclusterName = "vcluster" + DefaultVclusterNamespace = "vcluster" + DefaultClientTimeout = 32 * time.Second // the default in client-go is 32 +) + +var DefaultFramework = &Framework{} + +type Framework struct { + // The context to use for testing + Context context.Context + + // VclusterName is the name of the vcluster instance which we are testing + VclusterName string + + // VclusterNamespace is the namespace in host cluster of the current + // vcluster instance which we are testing + VclusterNamespace string + + // The suffix to append to the synced resources in the host namespace + Suffix string + + // HostConfig is the kubernetes rest config of the + // host kubernetes cluster were we are testing in + HostConfig *rest.Config + + // HostClient is the kubernetes client of the current + // host kubernetes cluster were we are testing in + HostClient *kubernetes.Clientset + + // HostCRClient is the controller runtime client of the current + // host kubernetes cluster were we are testing in + HostCRClient client.Client + + // VclusterConfig is the kubernetes rest config of the current + // vcluster instance which we are testing + VclusterConfig *rest.Config + + // VclusterClient is the kubernetes client of the current + // vcluster instance which we are testing + VclusterClient *kubernetes.Clientset + + // VclusterCRClient is the controller runtime client of the current + // vcluster instance which we are testing + VclusterCRClient client.Client + + // VclusterKubeconfigFile is a file containing kube config + // of the current vcluster instance which we are testing. + // This file shall be deleted in the end of the test suite execution. + VclusterKubeconfigFile *os.File + + // Scheme is the global scheme to use + Scheme *runtime.Scheme + + // Log is the logger that should be used + Log log.Logger + + // ClientTimeout value used in the clients + ClientTimeout time.Duration + + // MultiNamespaceMode denotes whether the multi namespace mode is enabled for the virtualcluster + MultiNamespaceMode bool +} + +func CreateFramework(ctx context.Context, scheme *runtime.Scheme) error { + // setup loggers + ctrl.SetLogger(logutil.NewLog(0)) + l := log.GetInstance() + + name := os.Getenv("VCLUSTER_NAME") + if name == "" { + name = DefaultVclusterName + } + ns := os.Getenv("VCLUSTER_NAMESPACE") + if ns == "" { + ns = DefaultVclusterNamespace + } + timeoutEnvVar := os.Getenv("VCLUSTER_CLIENT_TIMEOUT") + var timeout time.Duration + timeoutInt, err := strconv.Atoi(timeoutEnvVar) + if err == nil { + timeout = time.Duration(timeoutInt) * time.Second + } else { + timeout = DefaultClientTimeout + } + + suffix := os.Getenv("VCLUSTER_SUFFIX") + if suffix == "" { + //TODO: maybe implement some autodiscovery of the suffix value that would work with dev and prod setups + suffix = "vcluster" + } + translate.VClusterName = suffix + + var multiNamespaceMode bool + if os.Getenv("MULTINAMESPACE_MODE") == "true" { + translate.Default = translate.NewMultiNamespaceTranslator(ns) + multiNamespaceMode = true + } else { + translate.Default = translate.NewSingleNamespaceTranslator(ns) + } + + l.Infof("Testing Vcluster named: %s in namespace: %s", name, ns) + + hostConfig, err := ctrl.GetConfig() + if err != nil { + return err + } + hostConfig.Timeout = timeout + + hostClient, err := kubernetes.NewForConfig(hostConfig) + if err != nil { + return err + } + + hostCRClient, err := client.New(hostConfig, client.Options{Scheme: scheme}) + if err != nil { + return err + } + + // run port forwarder and retrieve kubeconfig for the vcluster + vKubeconfigFile, err := os.CreateTemp(os.TempDir(), "vcluster_e2e_kubeconfig_") + if err != nil { + return fmt.Errorf("could not create a temporary file: %w", err) + } + // vKubeconfigFile removal is done in the Framework.Cleanup() which gets called in ginkgo's AfterSuite() + + connectCmd := cmd.ConnectCmd{ + Log: l, + GlobalFlags: &flags.GlobalFlags{ + Namespace: ns, + Debug: true, + }, + KubeConfig: vKubeconfigFile.Name(), + LocalPort: 14550, // choosing a port that usually should be unused + } + err = connectCmd.Connect(ctx, nil, name, nil) + if err != nil { + l.Fatalf("failed to connect to the vcluster: %v", err) + } + + var vclusterConfig *rest.Config + var vclusterClient *kubernetes.Clientset + var vclusterCRClient client.Client + + err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute*5, false, func(ctx context.Context) (bool, error) { + output, err := os.ReadFile(vKubeconfigFile.Name()) + if err != nil { + return false, nil + } + + // try to parse config from file with retry because the file content might not be written + vclusterConfig, err = clientcmd.RESTConfigFromKubeConfig(output) + if err != nil { + return false, err + } + vclusterConfig.Timeout = timeout + + // create kubernetes client using the config retry in case port forwarding is not ready yet + vclusterClient, err = kubernetes.NewForConfig(vclusterConfig) + if err != nil { + return false, err + } + + vclusterCRClient, err = client.New(vclusterConfig, client.Options{Scheme: scheme}) + if err != nil { + return false, err + } + + // try to use the client with retry in case port forwarding is not ready yet + _, err = vclusterClient.CoreV1().ServiceAccounts("default").Get(ctx, "default", metav1.GetOptions{}) + if err != nil { + return false, err + } + + return true, nil + }) + if err != nil { + return err + } + + // create the framework + DefaultFramework = &Framework{ + Context: ctx, + VclusterName: name, + VclusterNamespace: ns, + Suffix: suffix, + HostConfig: hostConfig, + HostClient: hostClient, + HostCRClient: hostCRClient, + VclusterConfig: vclusterConfig, + VclusterClient: vclusterClient, + VclusterCRClient: vclusterCRClient, + VclusterKubeconfigFile: vKubeconfigFile, + Scheme: scheme, + Log: l, + ClientTimeout: timeout, + MultiNamespaceMode: multiNamespaceMode, + } + + l.Done("Framework successfully initialized") + return nil +} + +func (f *Framework) Cleanup() error { + return os.Remove(f.VclusterKubeconfigFile.Name()) +} diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/helper.go b/vendor/github.com/loft-sh/vcluster/test/framework/helper.go new file mode 100644 index 00000000..b154f9fa --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/test/framework/helper.go @@ -0,0 +1,63 @@ +package framework + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// ExpectEqual expects the specified two are the same, otherwise an exception raises +func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) +} + +// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises +func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...) +} + +// ExpectError expects an error happens, otherwise an exception raises +func ExpectError(err error, explain ...interface{}) { + gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...) +} + +func ExpectNotEmpty(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).ToNot(gomega.BeEmpty(), explain...) +} + +// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. +func ExpectNoError(err error, explain ...interface{}) { + ExpectNoErrorWithOffset(1, err, explain...) +} + +// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller +// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). +func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { + gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) +} + +// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter. +func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) +} + +// ExpectHaveKey expects the actual map has the key in the keyset +func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) +} + +// ExpectEmpty expects actual is empty +func ExpectEmpty(actual interface{}, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) +} + +// ExpectMatchRegexp expects the string to match the provided regular expression +func ExpectMatchRegexp(actual string, regexp string, explain ...interface{}) { + gomega.ExpectWithOffset(1, actual).To(gomega.MatchRegexp(regexp), explain...) +} + +// ExpectMatchRegexp expects the string to match the provided regular expression +func Failf(format string, args ...interface{}) { + ginkgo.Fail(fmt.Sprintf(format, args...)) +} diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/helpers.go b/vendor/github.com/loft-sh/vcluster/test/framework/helpers.go new file mode 100644 index 00000000..75be3496 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/test/framework/helpers.go @@ -0,0 +1,25 @@ +package framework + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// IsDefaultStorageClassAnnotation represents a StorageClass annotation that +// marks a class as the default StorageClass +const IsDefaultStorageClassAnnotation = "storageclass.kubernetes.io/is-default-class" + +// BetaIsDefaultStorageClassAnnotation is the beta version of IsDefaultStorageClassAnnotation. +// TODO: remove Beta when no longer used +const BetaIsDefaultStorageClassAnnotation = "storageclass.beta.kubernetes.io/is-default-class" + +// IsDefaultAnnotation returns a boolean if +// the annotation is set +// TODO: remove Beta when no longer needed +func IsDefaultAnnotation(obj metav1.ObjectMeta) bool { + if obj.Annotations[IsDefaultStorageClassAnnotation] == "true" { + return true + } + if obj.Annotations[BetaIsDefaultStorageClassAnnotation] == "true" { + return true + } + + return false +} diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/kubectlcmd.go b/vendor/github.com/loft-sh/vcluster/test/framework/kubectlcmd.go new file mode 100644 index 00000000..e2b12969 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/test/framework/kubectlcmd.go @@ -0,0 +1,213 @@ +package framework + +import ( + "bytes" + "fmt" + "io" + "net" + "net/url" + "os/exec" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/loft-sh/log" + "github.com/samber/lo" + "k8s.io/client-go/tools/clientcmd" + uexec "k8s.io/utils/exec" +) + +// Adopted from k8s test suite - https://github.com/kubernetes/kubernetes/blob/f2576efecdf2d902b12a3fedae7995311d4febfa/test/e2e/framework/kubectl/kubectl_utils.go#L43-L100 +// TestKubeconfig is a struct containing the needed attributes from TestContext and Framework(Namespace). +type TestKubeconfig struct { + CertDir string + Host string + KubeConfig string + KubeContext string + KubectlPath string + Namespace string // Every test has at least one namespace unless creation is skipped +} + +// NewTestKubeconfig returns a new Kubeconfig struct instance. +func NewTestKubeconfig(kubeconfig, namespace string) *TestKubeconfig { + return &TestKubeconfig{ + KubeConfig: kubeconfig, + Namespace: namespace, + KubectlPath: "kubectl", + } +} + +// KubectlCmd runs the kubectl executable through the wrapper script. +func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd { + defaultArgs := []string{} + + // Reference a --server option so tests can run anywhere. + if tk.Host != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+tk.Host) + } + if tk.KubeConfig != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+tk.KubeConfig) + + // Reference the KubeContext + if tk.KubeContext != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+tk.KubeContext) + } + } else { + if tk.CertDir != "" { + defaultArgs = append(defaultArgs, + fmt.Sprintf("--certificate-authority=%s", filepath.Join(tk.CertDir, "ca.crt")), + fmt.Sprintf("--client-certificate=%s", filepath.Join(tk.CertDir, "kubecfg.crt")), + fmt.Sprintf("--client-key=%s", filepath.Join(tk.CertDir, "kubecfg.key"))) + } + } + if tk.Namespace != "" { + defaultArgs = append(defaultArgs, fmt.Sprintf("--namespace=%s", tk.Namespace)) + } + kubectlArgs := append(defaultArgs, args...) + + // We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" + //and so on. + cmd := exec.Command(tk.KubectlPath, kubectlArgs...) + + // caller will invoke this and wait on it. + return cmd +} + +// Adopted from k8s test suite - https://github.com/kubernetes/kubernetes/blob/f2576efecdf2d902b12a3fedae7995311d4febfa/test/e2e/framework/util.go#L552-L687 +// KubectlBuilder is used to build, customize and execute a kubectl Command. +// Add more functions to customize the builder as needed. +type KubectlBuilder struct { + cmd *exec.Cmd + timeout <-chan time.Time +} + +// NewKubectlCommand returns a KubectlBuilder for running kubectl. +func NewKubectlCommand(kubeconfigPath, namespace string, args ...string) *KubectlBuilder { + b := new(KubectlBuilder) + tk := NewTestKubeconfig(kubeconfigPath, namespace) + b.cmd = tk.KubectlCmd(args...) + return b +} + +// WithEnv sets the given environment and returns itself. +func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder { + b.cmd.Env = env + return b +} + +// WithTimeout sets the given timeout and returns itself. +func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder { + b.timeout = t + return b +} + +// WithStdinData sets the given data to stdin and returns itself. +func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder { + b.cmd.Stdin = strings.NewReader(data) + return &b +} + +// WithStdinReader sets the given reader and returns itself. +func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder { + b.cmd.Stdin = reader + return &b +} + +// ExecOrDie runs the kubectl executable or dies if error occurs. +func (b KubectlBuilder) ExecOrDie(namespace string) string { + str, err := b.Exec() + // In case of i/o timeout error, try talking to the apiserver again after 2s before dying. + // Note that we're still dying after retrying so that we can get visibility to triage it further. + if isTimeout(err) { + log.GetInstance().Infof("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.") + time.Sleep(2 * time.Second) + retryStr, retryErr := RunKubectl(namespace, "version") + log.GetInstance().Infof("stdout: %q", retryStr) + log.GetInstance().Infof("err: %v", retryErr) + } + ExpectNoError(err) + return str +} + +func isTimeout(err error) bool { + if _, ok := lo.ErrorsAs[*url.Error](err); ok { + if err, ok := lo.ErrorsAs[net.Error](err); ok && err.Timeout() { + return true + } + } + + if err, ok := lo.ErrorsAs[net.Error](err); ok && err.Timeout() { + return true + } + + return false +} + +// Exec runs the kubectl executable. +func (b KubectlBuilder) Exec() (string, error) { + stdout, _, err := b.ExecWithFullOutput() + return stdout, err +} + +// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr. +func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) { + var stdout, stderr bytes.Buffer + cmd := b.cmd + cmd.Stdout, cmd.Stderr = &stdout, &stderr + + log.GetInstance().Infof("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately + if err := cmd.Start(); err != nil { + return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%w", cmd, cmd.Stdout, cmd.Stderr, err) + } + errCh := make(chan error, 1) + go func() { + errCh <- cmd.Wait() + }() + select { + case err := <-errCh: + if err != nil { + var rc = 127 + if ee, ok := lo.ErrorsAs[*exec.ExitError](err); ok { + rc = ee.Sys().(syscall.WaitStatus).ExitStatus() + log.GetInstance().Infof("rc: %d", rc) + } + return stdout.String(), stderr.String(), uexec.CodeExitError{ + Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%w", cmd, cmd.Stdout, cmd.Stderr, err), + Code: rc, + } + } + case <-b.timeout: + _ = b.cmd.Process.Kill() + return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr) + } + log.GetInstance().Infof("stderr: %q", stderr.String()) + log.GetInstance().Infof("stdout: %q", stdout.String()) + return stdout.String(), stderr.String(), nil +} + +// RunKubectlOrDie is a convenience wrapper over kubectlBuilder +func RunKubectlOrDie(kubeconfigPath, namespace string, args ...string) string { + return NewKubectlCommand(kubeconfigPath, namespace, args...).ExecOrDie(namespace) +} + +// RunKubectl is a convenience wrapper over kubectlBuilder +func RunKubectl(kubeconfigPath, namespace string, args ...string) (string, error) { + return NewKubectlCommand(kubeconfigPath, namespace, args...).Exec() +} + +// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder +// It will also return the command's stderr. +func RunKubectlWithFullOutput(kubeconfigPath, namespace string, args ...string) (string, string, error) { + return NewKubectlCommand(kubeconfigPath, namespace, args...).ExecWithFullOutput() +} + +// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin +func RunKubectlOrDieInput(kubeconfigPath, namespace string, data string, args ...string) string { + return NewKubectlCommand(kubeconfigPath, namespace, args...).WithStdinData(data).ExecOrDie(namespace) +} + +// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin +func RunKubectlInput(kubeconfigPath, namespace string, data string, args ...string) (string, error) { + return NewKubectlCommand(kubeconfigPath, namespace, args...).WithStdinData(data).Exec() +} diff --git a/vendor/github.com/loft-sh/vcluster/test/framework/util.go b/vendor/github.com/loft-sh/vcluster/test/framework/util.go new file mode 100644 index 00000000..6b027293 --- /dev/null +++ b/vendor/github.com/loft-sh/vcluster/test/framework/util.go @@ -0,0 +1,342 @@ +package framework + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/loft-sh/vcluster/pkg/util/podhelper" + "github.com/loft-sh/vcluster/pkg/util/translate" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/ptr" +) + +func (f *Framework) WaitForPodRunning(podName string, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second*5, PollTimeout, true, func(ctx context.Context) (bool, error) { + pod, err := f.HostClient.CoreV1().Pods(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(podName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + vpod, err := f.VclusterClient.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + if vpod.Status.Phase != corev1.PodRunning { + return false, nil + } + return true, nil + }) +} + +func (f *Framework) WaitForPodToComeUpWithReadinessConditions(podName string, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + pod, err := f.HostClient.CoreV1().Pods(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(podName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + if len(pod.Status.Conditions) < 5 { + return false, nil + } + return true, nil + }) +} + +func (f *Framework) WaitForPodToComeUpWithEphemeralContainers(podName string, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + pod, err := f.HostClient.CoreV1().Pods(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(podName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + if len(pod.Spec.EphemeralContainers) < 1 { + return false, nil + } + + return true, nil + }) +} + +func (f *Framework) WaitForPersistentVolumeClaimBound(pvcName, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + pvc, err := f.HostClient.CoreV1().PersistentVolumeClaims(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(pvcName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, err + } + + if pvc.Status.Phase != corev1.ClaimBound { + return false, nil + } + + vpvc, err := f.VclusterClient.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, err + } + + if vpvc.Status.Phase != corev1.ClaimBound { + return false, nil + } + + return true, nil + }) +} + +func (f *Framework) WaitForInitManifestConfigMapCreation(configMapName, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Millisecond*500, PollTimeout, true, func(ctx context.Context) (bool, error) { + _, err := f.VclusterClient.CoreV1().ConfigMaps(ns).Get(ctx, configMapName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + return true, nil + }) +} + +func (f *Framework) WaitForServiceAccount(saName string, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + _, err := f.VclusterClient.CoreV1().ServiceAccounts(ns).Get(ctx, saName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil + }) +} + +func (f *Framework) WaitForService(serviceName string, ns string) error { + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + _, err := f.HostClient.CoreV1().Services(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(serviceName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil + }) +} + +// Some vcluster operations list Service, e.g. pod translation. +// To ensure expected results of such operation we need to wait until newly created Service is in syncer controller cache, +// otherwise syncer will operate on slightly outdated resources, which is not good for test stability. +// This function ensures that Service is actually in controller cache by making an update and checking for it in physical service. +func (f *Framework) WaitForServiceInSyncerCache(serviceName string, ns string) error { + annotationKey := "e2e-test-bump" + updated := false + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + vService, err := f.VclusterClient.CoreV1().Services(ns).Get(ctx, serviceName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + if !updated { + if vService.Annotations == nil { + vService.Annotations = map[string]string{} + } + vService.Annotations[annotationKey] = "arbitrary" + _, err = f.VclusterClient.CoreV1().Services(ns).Update(ctx, vService, metav1.UpdateOptions{}) + if err != nil { + if kerrors.IsConflict(err) || kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + updated = true + } + + // Check for annotation + pService, err := f.HostClient.CoreV1().Services(translate.Default.PhysicalNamespace(ns)).Get(ctx, translate.Default.PhysicalName(serviceName, ns), metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + return false, err + } + _, ok := pService.Annotations[annotationKey] + return ok, nil + }) +} + +func (f *Framework) DeleteTestNamespace(ns string, waitUntilDeleted bool) error { + err := f.VclusterClient.CoreV1().Namespaces().Delete(f.Context, ns, metav1.DeleteOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return nil + } + return err + } + if !waitUntilDeleted { + return nil + } + return wait.PollUntilContextTimeout(f.Context, time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + _, err = f.VclusterClient.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + return true, nil + } + return false, err + }) +} + +func (f *Framework) GetDefaultSecurityContext() *corev1.SecurityContext { + return &corev1.SecurityContext{ + RunAsUser: ptr.To(int64(12345)), + } +} + +func (f *Framework) CreateCurlPod(ns string) (*corev1.Pod, error) { + return f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "curl"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "curl", + Image: "curlimages/curl", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: f.GetDefaultSecurityContext(), + Command: []string{"sleep"}, + Args: []string{"9999"}, + }, + }, + }, + }, metav1.CreateOptions{}) +} + +func (f *Framework) CreateNginxPodAndService(ns string) (*corev1.Pod, *corev1.Service, error) { + podName := "nginx" + serviceName := "nginx" + labels := map[string]string{"app": "nginx"} + + pod, err := f.VclusterClient.CoreV1().Pods(ns).Create(f.Context, &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: podName, + Image: "nginxinc/nginx-unprivileged", + ImagePullPolicy: corev1.PullIfNotPresent, + SecurityContext: f.GetDefaultSecurityContext(), + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, nil, err + } + + service, err := f.VclusterClient.CoreV1().Services(ns).Create(f.Context, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: ns, + }, + Spec: corev1.ServiceSpec{ + Selector: labels, + Ports: []corev1.ServicePort{ + {Port: 8080}, + }, + }, + }, metav1.CreateOptions{}) + + return pod, service, err +} + +func (f *Framework) TestServiceIsEventuallyReachable(curlPod *corev1.Pod, service *corev1.Service) { + var stdoutBuffer []byte + var lastError error + err := wait.PollUntilContextTimeout(f.Context, 10*time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + stdoutBuffer, _, lastError = f.curlService(ctx, curlPod, service) + if lastError == nil && string(stdoutBuffer) == "200" { + return true, nil + } + return false, nil + }) + ExpectNoError(err, "Nginx service is expected to be reachable. On the last attempt got %s http code and following error:", string(stdoutBuffer), lastError) +} + +func (f *Framework) TestServiceIsEventuallyUnreachable(curlPod *corev1.Pod, service *corev1.Service) { + var stdoutBuffer, stderrBuffer []byte + var lastError error + err := wait.PollUntilContextTimeout(f.Context, 10*time.Second, PollTimeout, true, func(ctx context.Context) (bool, error) { + stdoutBuffer, stderrBuffer, lastError = f.curlService(ctx, curlPod, service) + if lastError != nil && strings.Contains(string(stderrBuffer), "timed out") && string(stdoutBuffer) == "000" { + return true, nil + } + return false, nil + }) + ExpectNoError(err, "Nginx service is expected to be unreachable. On the last attempt got %s http code and following error:", string(stdoutBuffer), lastError) +} + +func (f *Framework) curlService(_ context.Context, curlPod *corev1.Pod, service *corev1.Service) ([]byte, []byte, error) { + url := fmt.Sprintf("http://%s.%s.svc:%d/", service.GetName(), service.GetNamespace(), service.Spec.Ports[0].Port) + cmd := []string{"curl", "-s", "--show-error", "-o", "/dev/null", "-w", "%{http_code}", "--max-time", "2", url} + return podhelper.ExecBuffered(f.Context, f.VclusterConfig, curlPod.GetNamespace(), curlPod.GetName(), curlPod.Spec.Containers[0].Name, cmd, nil) +} + +func (f *Framework) CreateEgressNetworkPolicyForDNS(ctx context.Context, ns string) (*networkingv1.NetworkPolicy, error) { + UDPProtocol := corev1.ProtocolUDP + return f.VclusterClient.NetworkingV1().NetworkPolicies(ns).Create(ctx, &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: "allow-coredns-egress"}, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 1053}, + Protocol: &UDPProtocol, + }, + }, + To: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"k8s-app": "kube-dns"}}, + NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"}}, + }, + }, + }, + }, + }, + }, metav1.CreateOptions{}) +}