diff --git a/.github/workflows/aws-cloud-regression-suite.yml b/.github/workflows/aws-cloud-regression-suite.yml index ce71f64..220d52f 100644 --- a/.github/workflows/aws-cloud-regression-suite.yml +++ b/.github/workflows/aws-cloud-regression-suite.yml @@ -19,15 +19,25 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Setup Codespace Container + run: | + echo "::group::Setup Codespace Container" + docker run -d -v $(pwd):/app --workdir /app/tests --rm --name codespaces ghcr.io/glueops/codespaces:v0.31.1 sleep infinity + echo "::endgroup::" + - name: Running AWS Regression Suite env: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - docker run -v $(pwd):/app --workdir /app/tests --rm -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 ghcr.io/glueops/codespaces:v0.31.1 ./run.sh + docker exec --workdir /app/tests -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 codespaces sh ./run.sh - name: Run AWS Destroy Only (in case previous step failed) env: AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | - docker run -v $(pwd):/app --workdir /app/tests --rm -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 ghcr.io/glueops/codespaces:v0.31.1 ./destroy-aws.sh + docker exec --workdir /app/tests -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID=AKIA3COQJC7C2PNUKZV4 -e AWS_DEFAULT_REGION=us-west-2 codespaces sh ./destroy-aws.sh + if: always() + + - name: Delete Codespaces Container + run: docker rm -f codespaces if: always() diff --git a/tests/k8s-test.sh b/tests/k8s-test.sh index 161e520..085a16b 100755 --- a/tests/k8s-test.sh +++ b/tests/k8s-test.sh @@ -3,11 +3,12 @@ set -e # Step 1: Verify storage driver installation (Amazon EBS CSI Driver) -echo "Checking if the storage driver is installed..." +echo "::group::Checking if the storage driver is installed..." kubectl get pods -n kube-system | grep "ebs-csi-" +echo "::endgroup::" # Step 2: Create a StorageClass -echo "Creating StorageClass..." +echo "::group::Creating StorageClass..." cat < /data/test.txt" kubectl exec -it $TEST_POD_NAME -- cat /data/test.txt +echo "::endgroup::" # Step 6: Clean up (Optional) -echo "Cleaning up test resources..." +echo "::group::Cleaning up test resources..." kubectl delete deployment test-app kubectl delete pvc test-pvc kubectl delete storageclass ebs-sc +echo "::endgroup::" diff --git a/tests/run.sh b/tests/run.sh index 6ee3eb1..06ff4cd 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -2,14 +2,20 @@ set -e +echo "::group::Destroy anything left running" ./destroy-aws.sh +echo "::endgroup::" +echo "::group::Deploying Kubernetes" echo "Terraform Init" terraform init echo "Terraform Plan" terraform plan echo "Terraform Apply" terraform apply -auto-approve +echo "::endgroup::" + +echo "::group::Configuring Kubernetes" echo "Authenticate with Kubernetes" aws eks update-kubeconfig --region us-west-2 --name captain-cluster --role-arn arn:aws:iam::761182885829:role/glueops-captain echo "Delete AWS CNI" @@ -18,16 +24,24 @@ echo "Install Calico CNI" helm repo add projectcalico https://docs.tigera.io/calico/charts helm repo update helm install calico projectcalico/tigera-operator --version v3.26.1 --namespace tigera-operator -f calico.yaml --create-namespace +echo "::endgroup::" + +echo "::group::Deploying new Node Pool" echo "Deploy node pool" sed -i 's/#//g' main.tf terraform apply -auto-approve echo "Get nodes and pods from kubernetes" kubectl get nodes kubectl get pods -A -o=wide -echo "Start Test Suite" +echo "::endgroup::" + +echo "==> Start Test Suite" ./k8s-test.sh -echo "Test Suite Complete" +echo "==> Test Suite Complete" + +echo "::group::Tear down Environment" echo "Terraform Destroy" terraform destroy -auto-approve ./destroy-aws.sh +echo "::endgroup::"