From 704a0fff47953a72fbafc31c1ddfa19239d5b699 Mon Sep 17 00:00:00 2001 From: fernandoataoldotcom Date: Sat, 14 Oct 2023 13:51:51 +0000 Subject: [PATCH] feat(tests): more groups --- .../workflows/aws-cloud-regression-suite.yml | 17 ++++++++++++----- tests/k8s-test.sh | 18 ++++++++++++------ tests/run.sh | 6 ++++++ 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/.github/workflows/aws-cloud-regression-suite.yml b/.github/workflows/aws-cloud-regression-suite.yml index ce71f64..8505d69 100644 --- a/.github/workflows/aws-cloud-regression-suite.yml +++ b/.github/workflows/aws-cloud-regression-suite.yml @@ -19,15 +19,22 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - - name: Running AWS Regression Suite + - name: Setup Codespace Container env: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - docker run -v $(pwd):/app --workdir /app/tests --rm -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 ghcr.io/glueops/codespaces:v0.31.1 ./run.sh + echo "::group::Setup Codespace Container" + docker run -d -v $(pwd):/app --workdir /app/tests --rm -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 --name codespaces ghcr.io/glueops/codespaces:v0.31.1 sleep infinity + echo "::endgroup::" + + - name: Running AWS Regression Suite + run: | + docker exec codespaces ./run.sh - name: Run AWS Destroy Only (in case previous step failed) - env: - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - docker run -v $(pwd):/app --workdir /app/tests --rm -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 ghcr.io/glueops/codespaces:v0.31.1 ./destroy-aws.sh + docker exec codespaces ./destroy-aws.sh if: always() + + - name: Delete Codespaces Container + run: docker rm -f codespaces diff --git a/tests/k8s-test.sh b/tests/k8s-test.sh index 161e520..085a16b 100755 --- a/tests/k8s-test.sh +++ b/tests/k8s-test.sh @@ -3,11 +3,12 @@ set -e # Step 1: Verify storage driver installation (Amazon EBS CSI Driver) -echo "Checking if the storage driver is installed..." +echo "::group::Checking if the storage driver is installed..." kubectl get pods -n kube-system | grep "ebs-csi-" +echo "::endgroup::" # Step 2: Create a StorageClass -echo "Creating StorageClass..." +echo "::group::Creating StorageClass..." cat < /data/test.txt" kubectl exec -it $TEST_POD_NAME -- cat /data/test.txt +echo "::endgroup::" # Step 6: Clean up (Optional) -echo "Cleaning up test resources..." +echo "::group::Cleaning up test resources..." kubectl delete deployment test-app kubectl delete pvc test-pvc kubectl delete storageclass ebs-sc +echo "::endgroup::" diff --git a/tests/run.sh b/tests/run.sh index 8342bc0..0edcb33 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -2,7 +2,9 @@ set -e +echo "::group::Destroy anything left running" ./destroy-aws.sh +echo "::endgroup::" echo "::group::Deploying Kubernetes" echo "Terraform Init" @@ -12,6 +14,7 @@ terraform plan echo "Terraform Apply" terraform apply -auto-approve echo "::endgroup::" + echo "::group::Configuring Kubernetes" echo "Authenticate with Kubernetes" aws eks update-kubeconfig --region us-west-2 --name captain-cluster --role-arn arn:aws:iam::761182885829:role/glueops-captain @@ -22,6 +25,7 @@ helm repo add projectcalico https://docs.tigera.io/calico/charts helm repo update helm install calico projectcalico/tigera-operator --version v3.26.1 --namespace tigera-operator -f calico.yaml --create-namespace echo "::endgroup::" + echo "::group::Deploying new Node Pool" echo "Deploy node pool" sed -i 's/#//g' main.tf @@ -30,11 +34,13 @@ echo "Get nodes and pods from kubernetes" kubectl get nodes kubectl get pods -A -o=wide echo "::endgroup::" + echo "::group::Start Test Suite" echo "" ./k8s-test.sh echo "Test Suite Complete" echo "::endgroup::" + echo "::group::Tear down Environment" echo "Terraform Destroy" terraform destroy -auto-approve