Skip to content

Commit

Permalink
Refactor E2E test suite + add RSA tests (#60)
Browse files Browse the repository at this point in the history
* refactor: own struct for keys

The tests have been refactored to use a dedicated struct for the private
and public keys, which contains the key itself and the path to it.

This will allow a bigger refactoring of the E2E tests, so that each test case can
be run independently of what type of key is used for signing & validation

Signed-off-by: Bruno Bressi <[email protected]>

* refactor: use private key variable

Instead of hardcoding the path in all tests, the value is derived from the previously
unused private key variable returned. This way, the tests can now be refactored to run
by only passing the key creation function

Signed-off-by: Bruno Bressi <[email protected]>

* refactor: [WIP] framework wraps testing.T

The framework struct has been refactored to abstract the golang testing framework.
This allows the E2E test cases to be written without having to create a new framework for each test.
The framework functions now do not have to do a lot of micromanagement and cleanup; they just check
whether an error has happened and they return. This allows for new functions to be written without having
to think about whether to fail the test or not.

The cleanup function takes care of the final step; cleaning up everything and then deciding whether the test
failed or passed.

Additionally, a new type is introduced, which will be used to wrap the tests cases, so they can be
run used t.Run.

* refactor: use new testing schema

The test cases are now refactored to accept a signing function, so that
the same test can be run regardless of RSA/ECDSA key without having to
write too much duplicate code.

The new fuction type is used for the signing function and each test case
must now return the set of actions required for the use case to be
tested, wrapped in a func which returns testing.T, so it may be run by
the t.Run method.

* chore: added E2E variable

Added variable so that the additional E2E test is also executed. This
test must be refactored in a future commit/ removed, as it depends on an
image already being present on the machine running the test.

* test: added rsa tests cases

Each case tests for ECDSA keys is now also tested for RSA keys.

The tests were also accelerated by reducing the delay between checks
from 5s to 500m

Signed-off-by: Bruno Bressi <[email protected]>

---------

Signed-off-by: Bruno Bressi <[email protected]>
  • Loading branch information
puffitos authored Sep 20, 2024
1 parent abe5e41 commit 6b3f0ca
Show file tree
Hide file tree
Showing 6 changed files with 399 additions and 469 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ PORT := 5000
.PHONY: test-e2e
test-e2e:
@echo "Running e2e tests..."
@go test -v -race -count 1 ./test/
@export COSIGN_E2E="42" && go test -v -race -count 1 ./test/

.PHONY: test-unit
test-unit:
Expand Down
196 changes: 115 additions & 81 deletions test/framework/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,24 @@ import (
// the cosignwebhook in a k8s cluster
type Framework struct {
k8s *kubernetes.Clientset
t *testing.T
err error
}

func New() (*Framework, error) {
// New creates a new Framework
func New(t *testing.T) (*Framework, error) {
if t == nil {
return nil, fmt.Errorf("test object must not be nil")
}

k8s, err := createClientSet()
if err != nil {
return nil, err
}

return &Framework{
k8s: k8s,
t: t,
}, nil
}

Expand All @@ -37,7 +45,6 @@ func createClientSet() (k8sClient *kubernetes.Clientset, err error) {
kubeconfig = os.Getenv("HOME") + "/.kube/config"
}

// create restconfig from kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
Expand All @@ -52,156 +59,179 @@ func createClientSet() (k8sClient *kubernetes.Clientset, err error) {

// Cleanup removes all resources created by the framework
// and cleans up the testing directory.
func (f *Framework) Cleanup(t testing.TB) {
cleanupKeys(t)
f.cleanupDeployments(t)
f.cleanupSecrets(t)
func (f *Framework) Cleanup() {
f.cleanupKeys()
f.cleanupDeployments()
f.cleanupSecrets()
if f.err != nil {
f.t.Fatal(f.err)
}
}

// cleanupDeployments removes all deployments from the testing namespace
// if they exist
func (f *Framework) cleanupDeployments(t testing.TB) {
func (f *Framework) cleanupDeployments() {
if f.k8s == nil {
t.Logf("k8s client is nil")
return
}

t.Logf("cleaning up deployments")
f.t.Logf("cleaning up deployments")
deployments, err := f.k8s.AppsV1().Deployments("test-cases").List(context.Background(), metav1.ListOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
for _, d := range deployments.Items {
err = f.k8s.AppsV1().Deployments("test-cases").Delete(context.Background(), d.Name, metav1.DeleteOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
}

timeout := time.After(30 * time.Second)
for {
select {
case <-timeout:
f.Cleanup(t)
f.err = fmt.Errorf("timeout reached while waiting for deployments to be deleted")
default:
pods, err := f.k8s.CoreV1().Pods("test-cases").List(context.Background(), metav1.ListOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}

if len(pods.Items) == 0 {
t.Logf("All pods are deleted")
f.t.Logf("All pods are deleted")
return
}
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
}
}
}

// cleanupSecrets removes all secrets from the testing namespace
func (f *Framework) cleanupSecrets(t testing.TB) {
func (f *Framework) cleanupSecrets() {
if f.k8s == nil {
t.Logf("k8s client is nil")
return
}

t.Logf("cleaning up secrets")
f.t.Logf("cleaning up secrets")
secrets, err := f.k8s.CoreV1().Secrets("test-cases").List(context.Background(), metav1.ListOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
if len(secrets.Items) == 0 {
f.t.Log("no secrets to delete")
return
}
for _, s := range secrets.Items {
err = f.k8s.CoreV1().Secrets("test-cases").Delete(context.Background(), s.Name, metav1.DeleteOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
}
f.t.Log("all secrets are deleted")
}

// GetPods returns the pod(s) of the deployment. The fetch is done by label selector (app=<deployment name>)
// If the get request fails, the test will fail and the framework will be cleaned up
func (f *Framework) GetPods(t *testing.T, d appsv1.Deployment) *corev1.PodList {
pods, err := f.k8s.CoreV1().Pods("test-cases").List(context.Background(), metav1.ListOptions{
func (f *Framework) GetPods(d appsv1.Deployment) *corev1.PodList {
if f.err != nil {
return nil
}

pods, err := f.k8s.CoreV1().Pods(d.Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", d.Name),
})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
}
return pods
}

// CreateDeployment creates a deployment in the testing namespace
func (f *Framework) CreateDeployment(t testing.TB, d appsv1.Deployment) {
_, err := f.k8s.AppsV1().Deployments("test-cases").Create(context.Background(), &d, metav1.CreateOptions{})
func (f *Framework) CreateDeployment(d appsv1.Deployment) {
if f.err != nil {
return
}

f.t.Logf("creating deployment %s", d.Name)
_, err := f.k8s.AppsV1().Deployments(d.Namespace).Create(context.Background(), &d, metav1.CreateOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
f.t.Logf("deployment %s created", d.Name)
}

// CreateSecret creates a secret in the testing namespace
func (f *Framework) CreateSecret(t *testing.T, secret corev1.Secret) {
t.Logf("creating secret %s", secret.Name)
s, err := f.k8s.CoreV1().Secrets("test-cases").Create(context.Background(), &secret, metav1.CreateOptions{})
func (f *Framework) CreateSecret(s corev1.Secret) {
if f.err != nil {
return
}

f.t.Logf("creating secret %s", s.Name)
_, err := f.k8s.CoreV1().Secrets(s.Namespace).Create(context.Background(), &s, metav1.CreateOptions{})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}
t.Logf("created secret %s", s.Name)
f.t.Logf("secret %s created", s.Name)
}

// WaitForDeployment waits until the deployment is ready
func (f *Framework) WaitForDeployment(t *testing.T, d appsv1.Deployment) {
t.Logf("waiting for deployment %s to be ready", d.Name)
func (f *Framework) WaitForDeployment(d appsv1.Deployment) {
if f.err != nil {
return
}

f.t.Logf("waiting for deployment %s to be ready", d.Name)
// wait until the deployment is ready
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
w, err := f.k8s.AppsV1().Deployments(d.Namespace).Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", d.Name),
})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}

for {
select {
case <-ctx.Done():
f.Cleanup(t)
t.Fatal("timeout reached while waiting for deployment to be ready")
f.err = fmt.Errorf("timeout reached while waiting for deployment to be ready")
case event := <-w.ResultChan():
deployment, ok := event.Object.(*appsv1.Deployment)
if !ok {
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
continue
}

if deployment.Status.ReadyReplicas == 1 {
t.Logf("deployment %s is ready", d.Name)
f.t.Logf("deployment %s is ready", d.Name)
return
}
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
}
}
}

// waitForReplicaSetCreation waits for the replicaset of the given deployment to be created
func (f *Framework) waitForReplicaSetCreation(t *testing.T, d appsv1.Deployment) (string, error) {
rs, err := f.k8s.AppsV1().ReplicaSets("test-cases").Watch(context.Background(), metav1.ListOptions{
func (f *Framework) waitForReplicaSetCreation(d appsv1.Deployment) string {
if f.err != nil {
return ""
}

rs, err := f.k8s.AppsV1().ReplicaSets(d.Namespace).Watch(context.Background(), metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", d.Name),
})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return ""
}

ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
Expand All @@ -210,37 +240,39 @@ func (f *Framework) waitForReplicaSetCreation(t *testing.T, d appsv1.Deployment)
for {
select {
case <-ctx.Done():
f.Cleanup(t)
t.Fatal("timeout reached while waiting for replicaset to be created")
f.err = fmt.Errorf("timeout reached while waiting for replicaset to be created")
case event := <-rs.ResultChan():
rs, ok := event.Object.(*appsv1.ReplicaSet)
if ok {
t.Logf("replicaset %s created", rs.Name)
return rs.Name, nil
f.t.Logf("replicaset %s created", rs.Name)
return rs.Name
}
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
}
}
}

// AssertDeploymentFailed asserts that the deployment cannot start
func (f *Framework) AssertDeploymentFailed(t *testing.T, d appsv1.Deployment) {
t.Logf("waiting for deployment %s to fail", d.Name)
func (f *Framework) AssertDeploymentFailed(d appsv1.Deployment) {
if f.err != nil {
return
}

f.t.Logf("waiting for deployment %s to fail", d.Name)

// watch for replicasets of the deployment
rsName, err := f.waitForReplicaSetCreation(t, d)
if err != nil {
f.Cleanup(t)
t.Fatal(err)
rsName := f.waitForReplicaSetCreation(d)
if rsName == "" {
return
}

// get warning events of deployment's namespace and check if the deployment failed
w, err := f.k8s.CoreV1().Events("test-cases").Watch(context.Background(), metav1.ListOptions{
w, err := f.k8s.CoreV1().Events(d.Namespace).Watch(context.Background(), metav1.ListOptions{
FieldSelector: fmt.Sprintf("involvedObject.name=%s", rsName),
})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}

ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
Expand All @@ -249,34 +281,37 @@ func (f *Framework) AssertDeploymentFailed(t *testing.T, d appsv1.Deployment) {
for {
select {
case <-ctx.Done():
f.Cleanup(t)
t.Fatal("timeout reached while waiting for deployment to fail")
f.err = fmt.Errorf("timeout reached while waiting for deployment to fail")
case event := <-w.ResultChan():
e, ok := event.Object.(*corev1.Event)
if !ok {
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
continue
}
if e.Reason == "FailedCreate" {
t.Logf("deployment %s failed: %s", d.Name, e.Message)
f.t.Logf("deployment %s failed: %s", d.Name, e.Message)
return
}
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
}
}
}

// AssertEventForPod asserts that a PodVerified event is created
func (f *Framework) AssertEventForPod(t *testing.T, reason string, p corev1.Pod) {
t.Logf("waiting for %s event to be created for pod %s", reason, p.Name)
func (f *Framework) AssertEventForPod(reason string, p corev1.Pod) {
if f.err != nil {
return
}

f.t.Logf("waiting for %s event to be created for pod %s", reason, p.Name)

// watch for events of deployment's namespace and check if the podverified event is created
w, err := f.k8s.CoreV1().Events("test-cases").Watch(context.Background(), metav1.ListOptions{
w, err := f.k8s.CoreV1().Events(p.Namespace).Watch(context.Background(), metav1.ListOptions{
FieldSelector: fmt.Sprintf("involvedObject.name=%s", p.Name),
})
if err != nil {
f.Cleanup(t)
t.Fatal(err)
f.err = err
return
}

ctx, done := context.WithTimeout(context.Background(), 30*time.Second)
Expand All @@ -285,19 +320,18 @@ func (f *Framework) AssertEventForPod(t *testing.T, reason string, p corev1.Pod)
for {
select {
case <-ctx.Done():
f.Cleanup(t)
t.Fatal("timeout reached while waiting for podverified event")
f.err = fmt.Errorf("timeout reached while waiting for event to be created")
case event := <-w.ResultChan():
e, ok := event.Object.(*corev1.Event)
if !ok {
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
continue
}
if e.Reason == reason {
t.Logf("%s event created for pod %s", reason, p.Name)
f.t.Logf("%s event created for pod %s", reason, p.Name)
return
}
time.Sleep(5 * time.Second)
time.Sleep(500 * time.Millisecond)
}
}
}
Loading

0 comments on commit 6b3f0ca

Please sign in to comment.