Skip to content

Commit

Permalink
Experiment to use github action for e2e
Browse files Browse the repository at this point in the history
Signed-off-by: clyang82 <[email protected]>
  • Loading branch information
clyang82 committed Aug 22, 2024
1 parent 4129196 commit 796133a
Show file tree
Hide file tree
Showing 16 changed files with 704 additions and 21 deletions.
30 changes: 30 additions & 0 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: E2E Test

on:
workflow_dispatch: {}
pull_request:
branches:
- main

env:
GO_VERSION: '1.22'
GO_REQUIRED_MIN_VERSION: ''

permissions:
contents: read

jobs:
e2e:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
- name: install ginkgo
run: go install github.com/onsi/ginkgo/v2/[email protected]
- name: Test E2E
run: |
make kessel-e2e-setup
9 changes: 9 additions & 0 deletions operator/pkg/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,15 @@ const (

// GHAgentInstallACMHubLabelKey is to indicate whether to install ACM hub on the agent
GHAgentACMHubInstallLabelKey = "global-hub.open-cluster-management.io/hub-cluster-install"

// CommunityCatalogSourceNameKey defines the catalog source name. it is mainly used for deploy kafka in KinD cluster.
CommunityCatalogSourceNameKey = "global-hub.open-cluster-management.io/catalog-source-name"
// CommunityCatalogSourceNamespaceKey defines the catalog source namespace. it is mainly used for deploy kafka in KinD cluster.
CommunityCatalogSourceNamespaceKey = "global-hub.open-cluster-management.io/catalog-source-namespace"
// GHKafkaTLSListener define the tls listener.
// for example: {"authentication": { "type": "tls" }, "configuration": { "bootstrap": { "nodePort": 30095 }
// }, "name": "external", "port": 9095, "tls": true, "type": "nodeport" }
GHKafkaTLSListener = "global-hub.open-cluster-management.io/kafka-tls-listener"
)

// AggregationLevel specifies the level of aggregation leaf hubs should do before sending the information
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,12 @@ type strimziTransporter struct {
kafkaClusterNamespace string

// subscription properties
subName string
subCommunity bool
subChannel string
subCatalogSourceName string
subPackageName string
subName string
subCommunity bool
subChannel string
subCatalogSourceName string
subCatalogSourceNamespace string
subPackageName string

// global hub config
mgh *operatorv1alpha4.MulticlusterGlobalHub
Expand All @@ -110,11 +111,12 @@ func NewStrimziTransporter(mgr ctrl.Manager, mgh *operatorv1alpha4.MulticlusterG
kafkaClusterName: KafkaClusterName,
kafkaClusterNamespace: mgh.Namespace,

subName: DefaultKafkaSubName,
subCommunity: false,
subChannel: DefaultAMQChannel,
subPackageName: DefaultAMQPackageName,
subCatalogSourceName: DefaultCatalogSourceName,
subName: DefaultKafkaSubName,
subCommunity: false,
subChannel: DefaultAMQChannel,
subPackageName: DefaultAMQPackageName,
subCatalogSourceName: DefaultCatalogSourceName,
subCatalogSourceNamespace: DefaultCatalogSourceNamespace,

waitReady: true,
enableTLS: true,
Expand All @@ -134,6 +136,15 @@ func NewStrimziTransporter(mgr ctrl.Manager, mgh *operatorv1alpha4.MulticlusterG
k.subChannel = CommunityChannel
k.subPackageName = CommunityPackageName
k.subCatalogSourceName = CommunityCatalogSourceName
// it will be operatorhubio-catalog to install kafka in KinD cluster
catalogSourceName, ok := mgh.Annotations[operatorconstants.CommunityCatalogSourceNameKey]
if ok && catalogSourceName != "" {
k.subCatalogSourceName = catalogSourceName
}
catalogSourceNamespace, ok := mgh.Annotations[operatorconstants.CommunityCatalogSourceNamespaceKey]
if ok && catalogSourceNamespace != "" {
k.subCatalogSourceNamespace = catalogSourceNamespace
}
}

if mgh.Spec.AvailabilityConfig == operatorv1alpha4.HABasic {
Expand Down Expand Up @@ -626,6 +637,23 @@ func (k *strimziTransporter) newKafkaCluster(mgh *operatorv1alpha4.MulticlusterG
kafkaSpecZookeeperStorage.Class = &mgh.Spec.DataLayer.StorageClass
}

kafkaTLSListener := kafkav1beta2.KafkaSpecKafkaListenersElem{
Name: "tls",
Port: 9093,
Tls: true,
Type: kafkav1beta2.KafkaSpecKafkaListenersElemTypeRoute,
Authentication: &kafkav1beta2.KafkaSpecKafkaListenersElemAuthentication{
Type: kafkav1beta2.KafkaSpecKafkaListenersElemAuthenticationTypeTls,
},
}
// Get the tls kafka listener if it is defined in annotation. it is only used for tests
listener, ok := mgh.Annotations[operatorconstants.GHKafkaTLSListener]
if ok && listener != "" {
if err := json.Unmarshal([]byte(listener), &kafkaTLSListener); err != nil {
klog.Infof("failed to unmarshal to KafkaSpecKafkaListenersElem: %s", err)
}
}

kafkaCluster := &kafkav1beta2.Kafka{
ObjectMeta: metav1.ObjectMeta{
Name: k.kafkaClusterName,
Expand All @@ -651,15 +679,7 @@ func (k *strimziTransporter) newKafkaCluster(mgh *operatorv1alpha4.MulticlusterG
Tls: false,
Type: kafkav1beta2.KafkaSpecKafkaListenersElemTypeInternal,
},
{
Name: "tls",
Port: 9093,
Tls: true,
Type: kafkav1beta2.KafkaSpecKafkaListenersElemTypeRoute,
Authentication: &kafkav1beta2.KafkaSpecKafkaListenersElemAuthentication{
Type: kafkav1beta2.KafkaSpecKafkaListenersElemAuthenticationTypeTls,
},
},
kafkaTLSListener,
},
Resources: k.getKafkaResources(mgh),
Authorization: &kafkav1beta2.KafkaSpecKafkaAuthorization{
Expand Down Expand Up @@ -981,7 +1001,7 @@ func (k *strimziTransporter) newSubscription(mgh *operatorv1alpha4.MulticlusterG
InstallPlanApproval: DefaultInstallPlanApproval,
Package: k.subPackageName,
CatalogSource: k.subCatalogSourceName,
CatalogSourceNamespace: DefaultCatalogSourceNamespace,
CatalogSourceNamespace: k.subCatalogSourceNamespace,
Config: subConfig,
},
}
Expand Down
3 changes: 3 additions & 0 deletions test/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ e2e-dep:
e2e-setup: tidy vendor e2e-dep
./test/script/e2e_setup.sh

kessel-e2e-setup:
./test/kessel_e2e/setup/e2e_setup.sh

e2e-cleanup:
./test/script/e2e_cleanup.sh

Expand Down
159 changes: 159 additions & 0 deletions test/kessel_e2e/setup/e2e_setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
#!/bin/bash

#!/usr/bin/env bash

set -exo pipefail

CURRENT_DIR=$(cd "$(dirname "$0")" || exit;pwd)
CONFIG_DIR=${CURRENT_DIR}

function initKinDCluster() {
clusterName="$1"
if [[ $(kind get clusters | grep "^${clusterName}$" || true) != "${clusterName}" ]]; then
kind create cluster --name "$clusterName" --wait 1m
currentDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
kubectl config view --context="kind-${clusterName}" --minify --flatten > ${currentDir}/kubeconfig-${clusterName}
fi
}

enableRouter() {
kubectl create ns openshift-ingress --dry-run=client -o yaml | kubectl --context "$1" apply -f -
GIT_PATH="https://raw.githubusercontent.com/openshift/router/release-4.16"
kubectl --context "$1" apply -f $GIT_PATH/deploy/route_crd.yaml
# pacman application depends on route crd, but we do not need to have route pod running in the cluster
# kubectl apply -f $GIT_PATH/deploy/router.yaml
# kubectl apply -f $GIT_PATH/deploy/router_rbac.yaml
}

enableServiceCA() {
HUB_OF_HUB_NAME=$2
# apply service-ca
kubectl --context $1 label node ${HUB_OF_HUB_NAME}-control-plane node-role.kubernetes.io/master=
kubectl --context $1 apply -f ${CURRENT_DIR}/service-ca-crds
kubectl --context $1 create ns openshift-config-managed
kubectl --context $1 apply -f ${CURRENT_DIR}/service-ca/
}

# deploy olm
function enableOLM() {
NS=olm
csvPhase=$(kubectl --context "$1" get csv -n "${NS}" packageserver -o jsonpath='{.status.phase}' 2>/dev/null || echo "Waiting for CSV to appear")
if [[ "$csvPhase" == "Succeeded" ]]; then
echo "OLM is already installed in ${NS} namespace. Exiting..."
exit 1
fi

GIT_PATH="https://raw.githubusercontent.com/operator-framework/operator-lifecycle-manager/v0.28.0"
kubectl --context "$1" apply -f "${GIT_PATH}/deploy/upstream/quickstart/crds.yaml"
kubectl --context "$1" wait --for=condition=Established -f "${GIT_PATH}/deploy/upstream/quickstart/crds.yaml" --timeout=60s
kubectl --context "$1" apply -f "${GIT_PATH}/deploy/upstream/quickstart/olm.yaml"

# apply proxies.config.openshift.io which is required by olm
kubectl --context "$1" apply -f "https://raw.githubusercontent.com/openshift/api/master/payload-manifests/crds/0000_03_config-operator_01_proxies.crd.yaml"

retries=60
csvPhase=$(kubectl --context "$1" get csv -n "${NS}" packageserver -o jsonpath='{.status.phase}' 2>/dev/null || echo "Waiting for CSV to appear")
while [[ $retries -gt 0 && "$csvPhase" != "Succeeded" ]]; do
echo "csvPhase: ${csvPhase}"
sleep 1
retries=$((retries - 1))
csvPhase=$(kubectl --context "$1" get csv -n "${NS}" packageserver -o jsonpath='{.status.phase}' 2>/dev/null || echo "Waiting for CSV to appear")
done
kubectl --context "$1" rollout status -w deployment/packageserver --namespace="${NS}" --timeout=60s

if [ $retries == 0 ]; then
echo "CSV \"packageserver\" failed to reach phase succeeded"
exit 1
fi
echo "CSV \"packageserver\" install succeeded"
}

# deploy global hub
function deployGlobalHub() {
# build images
cd ${CURRENT_DIR}/../../../
MULTICLUSTER_GLOBAL_HUB_OPERATOR_IMAGE_REF="image-registry.testing/stolostron/multicluster-global-hub-operator:latest"
MULTICLUSTER_GLOBAL_HUB_MANAGER_IMAGE_REF="image-registry.testing/stolostron/multicluster-global-hub-manager:latest"
MULTICLUSTER_GLOBAL_HUB_AGENT_IMAGE_REF="image-registry.testing/stolostron/multicluster-global-hub-agent:latest"
docker build -t $MULTICLUSTER_GLOBAL_HUB_OPERATOR_IMAGE_REF operator/Dockerfile
docker build -t $MULTICLUSTER_GLOBAL_HUB_MANAGER_IMAGE_REF manager/Dockerfile
docker build -t $MULTICLUSTER_GLOBAL_HUB_AGENT_IMAGE_REF agent/Dockerfile

# load to kind cluster
kind load docker-image $MULTICLUSTER_GLOBAL_HUB_OPERATOR_IMAGE_REF --name $2
kind load docker-image $MULTICLUSTER_GLOBAL_HUB_MANAGER_IMAGE_REF --name $2
kind load docker-image $MULTICLUSTER_GLOBAL_HUB_AGENT_IMAGE_REF --name $2

# deploy global hub operator
cd operator; make deploy IMG=$MULTICLUSTER_GLOBAL_HUB_OPERATOR_IMAGE_REF
cat <<EOF | kubectl --context "$1" apply -f -
apiVersion: operator.open-cluster-management.io/v1alpha4
kind: MulticlusterGlobalHub
metadata:
annotations:
global-hub.open-cluster-management.io/catalog-source-name: operatorhubio-catalog
global-hub.open-cluster-management.io/catalog-source-namespace: olm
global-hub.open-cluster-management.io/kafka-tls-listener: |
{"authentication": { "type": "tls" }, "configuration": { "bootstrap": { "nodePort": 30095 } }, "name": "external", "port": 9095, "tls": true, "type": "nodeport" }
name: multiclusterglobalhub
namespace: multicluster-global-hub
spec:
availabilityConfig: High
dataLayer:
kafka:
topics:
specTopic: gh-spec
statusTopic: gh-event.*
postgres:
retention: 18m
enableMetrics: false
EOF
}

wait_cmd() {
local command=$1
local seconds=${2:-"600"}
local interval=2 # Interval for updating the waiting message
local command_interval=4 # Interval for executing the command
local signs=(🙉 🙈 🙊)
local elapsed=0
local last_command_run=0

echo -e "\r${CYAN}$1 $NC "
if eval "${command}"; then
return 0
fi

while [ $elapsed -le "$seconds" ]; do
if [ $((elapsed - last_command_run)) -ge $command_interval ]; then
if [ -n "$(eval ${command})" ]; then
return 0 # Return success status code
fi
last_command_run=$elapsed
fi

if [ $elapsed -eq 0 ]; then
echo -e "\r placeholder will be overwrite by wating message"
fi
local index=$((elapsed / interval % ${#signs[@]}))
echo -ne "\r ${signs[$index]} Waiting $elapsed seconds: $1"
sleep $interval
((elapsed += interval))
done

echo -e "\n$RED Timeout $seconds seconds $NC: $command"
return 1 # Return failure status code
}


function wait_global_hub_ready() {
wait_cmd "kubectl get deploy/multicluster-global-hub-manager -n multicluster-global-hub --context $1"
kubectl wait deploy/multicluster-global-hub-manager -n multicluster-global-hub --for condition=Available=True --timeout=600s --context "$1"
}

initKinDCluster global-hub
enableRouter kind-global-hub
enableServiceCA kind-global-hub global-hub
enableOLM kind-global-hub
deployGlobalHub kind-global-hub global-hub
wait_global_hub_ready kind-global-hub
Loading

0 comments on commit 796133a

Please sign in to comment.