-
Notifications
You must be signed in to change notification settings - Fork 27
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
K8SPS-288 - async self healing test #428
base: main
Are you sure you want to change the base?
Conversation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Remaining comments which cannot be posted as a review comment to avoid GitHub Rate Limit
shfmt
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 239 to 241 in 1b9dcdf
local command="$1" | |
local uri="$2" | |
local pod="$3" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 243 to 244 in 1b9dcdf
client_pod=$(get_client_pod) | |
wait_pod $client_pod 1>&2 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 246 to 248 in 1b9dcdf
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | |
bash -c "printf '%s\n' \"${command}\" | mysqlsh --sql --quiet-start=2 $uri" 2>&1 | | |
tail -n +2 |
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" exec mysql-client -- bash -c "curl -s -k $*" |
[shfmt] reported by reviewdog 🐶
echo $(get_cluster_name) | tr -cd '[^a-zA-Z0-9_]+' |
[shfmt] reported by reviewdog 🐶
local idx=${1:-0} |
[shfmt] reported by reviewdog 🐶
echo "root:root_password@$(get_cluster_name)-mysql-${idx}.$(get_cluster_name)-mysql.${NAMESPACE}" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 266 to 267 in 1b9dcdf
local uri="$1" | |
local pod="$2" |
[shfmt] reported by reviewdog 🐶
client_pod=$(get_client_pod) |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 271 to 273 in 1b9dcdf
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- mysqlsh --uri $uri --cluster --result-format json -- cluster status | | |
sed -e 's/mysql: //' | | |
(grep -v 'Using a password on the command line interface can be insecure.' || :) |
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" get ps -o jsonpath='{.items[0].metadata.name}' |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-mysql" |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-router" |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-haproxy" |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-orc" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 305 to 306 in 1b9dcdf
local cluster=$1 | |
local index=$2 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-mysql-${index}.${cluster}-mysql" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 312 to 313 in 1b9dcdf
local cluster=$1 | |
local index=$2 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-orc-${index}.${cluster}-orc" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 319 to 335 in 1b9dcdf
local metric=$1 | |
local instance=$2 | |
local user_pass=$3 | |
local start=$($date -u "+%s" -d "-1 minute") | |
local end=$($date -u "+%s") | |
set +o xtrace | |
retry=0 | |
until run_curl "https://${user_pass}@monitoring-service/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28$metric%7Bnode_name%3D%7E%22$instance%22%7d%20or%20$metric%7Bnode_name%3D%7E%22$instance%22%7D%29&start=$start&end=$end&step=60" | jq '.data.result[0].values[][1]' | grep '^"[0-9]*"$'; do | |
sleep 1 | |
let retry+=1 | |
if [ $retry -ge 30 ]; then | |
echo "Max retry count $retry reached. Data about instance $instance was not collected!" | |
exit 1 | |
fi | |
done | |
set -o xtrace |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 339 to 346 in 1b9dcdf
local instance=$1 | |
local user_pass=$2 | |
local start=$($date -u "+%Y-%m-%dT%H:%M:%S" -d "-30 minute") | |
local end=$($date -u "+%Y-%m-%dT%H:%M:%S") | |
local endpoint=monitoring-service | |
local payload=$( | |
cat <<EOF |
[shfmt] reported by reviewdog 🐶
) |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 372 to 373 in 1b9dcdf
run_curl -XPOST -d "'$(echo ${payload} | sed 's/\n//g')'" "https://${user_pass}@${endpoint}/v0/qan/GetReport" | | |
jq '.rows[].fingerprint' |
[shfmt] reported by reviewdog 🐶
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=mysql | awk '{print $1}' |
[shfmt] reported by reviewdog 🐶
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=router | awk '{print $1}' |
[shfmt] reported by reviewdog 🐶
local args=$1 |
[shfmt] reported by reviewdog 🐶
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 391 to 404 in 1b9dcdf
local service=$1 | |
while (kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do | |
sleep 1 | |
done | |
if [ "$(kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.clusterIP}' | |
return | |
fi | |
until kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do | |
sleep 1 | |
done | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].ip}' | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 408 to 410 in 1b9dcdf
local cluster_name=${1} | |
local cluster_size=${2} | |
local orc_size=${3} |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 412 to 414 in 1b9dcdf
if [ -z "${orc_size}" ]; then | |
orc_size=3 | |
fi |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 416 to 424 in 1b9dcdf
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.ready}')" == "${orc_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (async)' | |
sleep 15 | |
done |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 428 to 430 in 1b9dcdf
local cluster_name=${1} | |
local cluster_size=${2} | |
local router_size=${3} |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 432 to 434 in 1b9dcdf
if [ -z "${router_size}" ]; then | |
router_size=3 | |
fi |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 436 to 444 in 1b9dcdf
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.router.ready}')" == "${router_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.router.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (group replication)' | |
sleep 15 | |
done |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 448 to 470 in 1b9dcdf
local pod=$1 | |
set +o xtrace | |
retry=0 | |
echo -n $pod | |
until kubectl get pod/$pod -n "${NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].ready}' 2>/dev/null | grep 'true'; do | |
sleep 1 | |
echo -n . | |
let retry+=1 | |
if [ $retry -ge 360 ]; then | |
kubectl describe pod/$pod -n "${NAMESPACE}" | |
kubectl logs $pod -n "${NAMESPACE}" | |
kubectl logs $(get_operator_pod) -n "${OPERATOR_NS:-$NAMESPACE}" | | |
grep -v 'level=info' | | |
grep -v 'level=debug' | | |
grep -v 'Getting tasks for pod' | | |
grep -v 'Getting pods from source' | | |
tail -100 | |
echo max retry count $retry reached. something went wrong with operator or kubernetes cluster | |
exit 1 | |
fi | |
done | |
set -o xtrace |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 474 to 497 in 1b9dcdf
local name=$1 | |
local target_namespace=${2:-"$namespace"} | |
sleep 10 | |
set +o xtrace | |
retry=0 | |
echo -n $name | |
until [ -n "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.replicas}')" \ | |
-a "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.replicas}')" \ | |
== "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.readyReplicas}')" ]; do | |
sleep 1 | |
echo -n . | |
let retry+=1 | |
if [ $retry -ge 360 ]; then | |
kubectl logs $(get_operator_pod) -c operator | | |
grep -v 'level=info' | | |
grep -v 'level=debug' | | |
tail -100 | |
echo max retry count $retry reached. something went wrong with operator or kubernetes cluster | |
exit 1 | |
fi | |
done | |
echo | |
set -o xtrace |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 501 to 535 in 1b9dcdf
local RAM_SIZE=$1 | |
local RDS_MEM_INSTANCE=12582880 | |
local CUSTOM_INNODB_SIZE=$2 | |
local CUSTOM_CONNECTIONS=$3 | |
local INNODB_SIZE=$(run_mysql \ | |
'SELECT @@innodb_buffer_pool_size;' \ | |
"-h $(get_haproxy_svc "$(get_cluster_name)") -uroot -proot_password") | |
local CONNECTIONS=$(run_mysql \ | |
'SELECT @@max_connections;' \ | |
"-h $(get_haproxy_svc "$(get_cluster_name)") -uroot -proot_password") | |
if [[ -n ${CUSTOM_INNODB_SIZE} ]]; then | |
if [[ ${INNODB_SIZE} != ${CUSTOM_INNODB_SIZE} ]]; then | |
echo "innodb_buffer_pool_size is set to ${INNODB_SIZE}, which does not correlate with ${CUSTOM_INNODB_SIZE} from custom config" | |
exit 1 | |
fi | |
else | |
if [[ ${INNODB_SIZE} != $((RAM_SIZE * 50 / 100)) ]]; then | |
echo "innodb_buffer_pool_size is set to ${INNODB_SIZE}, which does not correlate with cr.pxc.limits.memory * 0.5" | |
exit 1 | |
fi | |
fi | |
if [[ -n ${CUSTOM_CONNECTIONS} ]]; then | |
if [[ ${CONNECTIONS} != ${CUSTOM_CONNECTIONS} ]]; then | |
echo "max_connections is set to ${AUTO_CONNECTIONS}, which does not correlate with ${CUSTOM_CONNECTIONS} from custom config" | |
exit 1 | |
fi | |
else | |
if [[ ${CONNECTIONS} != $((RAM_SIZE / RDS_MEM_INSTANCE)) ]]; then | |
echo "max_connections is set to ${CONNECTIONS}, which does not correlate with cr.pxc.limits.memory / ${RDS_MEM_INSTANCE}" | |
exit 1 | |
fi | |
fi |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-router" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 545 to 547 in 1b9dcdf
kubectl create configmap -n "${OPERATOR_NS:-$NAMESPACE}" versions \ | |
--from-file "${TESTS_CONFIG_DIR}/operator.9.9.9.ps-operator.dep.json" \ | |
--from-file "${TESTS_CONFIG_DIR}/operator.9.9.9.ps-operator.json" |
[shfmt] reported by reviewdog 🐶
kubectl apply -n "${OPERATOR_NS:-$NAMESPACE}" -f "${TESTS_CONFIG_DIR}/vs.yaml" |
[shfmt] reported by reviewdog 🐶
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 555 to 557 in 1b9dcdf
kubectl create namespace cert-manager || : | |
kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true || : | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml --validate=false || : 2>/dev/null |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 561 to 562 in 1b9dcdf
kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml --validate=false || : 2>/dev/null | |
kubectl delete --grace-period=0 --force=true namespace cert-manager |
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" get pods -l mysql.percona.com/primary=true -ojsonpath="{.items[0].metadata.name}" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 570 to 571 in 1b9dcdf
local haproxy_pod=$1 | |
local haproxy_pod_ip=$(kubectl -n "${NAMESPACE}" get pods ${haproxy_pod} -o jsonpath="{.status.podIP}") |
[shfmt] reported by reviewdog 🐶
run_mysql "SHOW VARIABLES LIKE '%hostname%';" "-h ${haproxy_pod_ip} -P3306 -uroot -proot_password" | awk '{print $2}' |
[shfmt] reported by reviewdog 🐶
run_mysql "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 581 to 584 in 1b9dcdf
local certificate=$1 | |
local expected_sans=$2 | |
local have=$(mktemp) | |
local want=$(mktemp) |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 586 to 587 in 1b9dcdf
kubectl -n "${NAMESPACE}" get certificate "${certificate}" -o jsonpath='{.spec.dnsNames}' | jq '.' >"${have}" | |
echo "${expected_sans}" | jq '.' >"${want}" |
[shfmt] reported by reviewdog 🐶
diff "${have}" "${want}" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 593 to 630 in 1b9dcdf
local secrets | |
local passwords | |
local pods | |
secrets=$(kubectl get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value') | |
passwords="$(for i in $secrets; do | |
base64 -d <<<$i | |
echo | |
done) $secrets" | |
pods=$(kubectl -n "${NAMESPACE}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs() { | |
local containers | |
local count | |
NS=$1 | |
for p in $pods; do | |
local containers=$(kubectl -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}') | |
for c in $containers; do | |
kubectl -n "$NS" logs $p -c $c >${TEMP_DIR}/logs_output-$p-$c.txt | |
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt | |
for pass in $passwords; do | |
local count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :) | |
if [[ $count != 0 ]]; then | |
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt | |
false | |
fi | |
done | |
done | |
echo | |
done | |
} | |
collect_logs $NAMESPACE | |
if [ -n "$OPERATOR_NS" ]; then | |
local pods=$(kubectl -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs $OPERATOR_NS | |
fi |
[shfmt] reported by reviewdog 🐶
destroy_chaos_mesh |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 636 to 642 in 1b9dcdf
helm repo add chaos-mesh https://charts.chaos-mesh.org | |
if [ -n "${MINIKUBE}" ]; then | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=docker --set dashboard.create=false --version 2.5.1 | |
else | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 | |
fi | |
sleep 10 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 646 to 666 in 1b9dcdf
local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//') | |
if [ -n "${chaos_mesh_ns}" ]; then | |
helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || : | |
fi | |
timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || : | |
for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do | |
kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace | | |
while read -r line; do | |
local kind=$(echo "$line" | awk '{print $1}') | |
local name=$(echo "$line" | awk '{print $2}') | |
local namespace=$(echo "$line" | awk '{print $3}') | |
kubectl patch $kind $name -n "${namespace}" --type=merge -p '{"metadata":{"finalizers":[]}}' || : | |
done | |
timeout 30 kubectl delete ${i} --all --all-namespaces || : | |
done | |
timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || : |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 670 to 677 in 1b9dcdf
local ns=$1 | |
local selector=$2 | |
local pod_label=$3 | |
local label_value=$4 | |
local chaos_suffix=$5 | |
if [ "${selector}" == "pod" ]; then | |
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 680 to 683 in 1b9dcdf
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
elif [ "${selector}" == "label" ]; then | |
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 687 to 690 in 1b9dcdf
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
fi | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 694 to 696 in 1b9dcdf
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 701 to 703 in 1b9dcdf
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 707 to 709 in 1b9dcdf
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 714 to 716 in 1b9dcdf
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-network-loss.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
certificate="$1" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 722 to 723 in 1b9dcdf
local pod_name | |
pod_name=$(kubectl get -n "${NAMESPACE}" pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}') |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 725 to 726 in 1b9dcdf
local revision | |
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') |
[shfmt] reported by reviewdog 🐶
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 730 to 738 in 1b9dcdf
# wait for new revision | |
for i in {1..10}; do | |
local new_revision | |
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
if [ "$((revision + 1))" == "$new_revision" ]; then | |
break | |
fi | |
sleep 1 | |
done |
[shfmt] reported by reviewdog 🐶
local service_account="cmctl" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 744 to 747 in 1b9dcdf
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | |
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | |
kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" delete deployment percona-server-mysql-operator --force --grace-period=0 || true | ||
if [[ $OPERATOR_NS ]]; then | ||
kubectl delete namespace $OPERATOR_NS --force --grace-period=0 || true | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" delete deployment percona-server-mysql-operator --force --grace-period=0 || true | |
if [[ $OPERATOR_NS ]]; then | |
kubectl delete namespace $OPERATOR_NS --force --grace-period=0 || true | |
fi | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" delete deployment percona-server-mysql-operator --force --grace-period=0 || true | |
if [[ $OPERATOR_NS ]]; then | |
kubectl delete namespace $OPERATOR_NS --force --grace-period=0 || true | |
fi |
} | ||
|
||
deploy_non_tls_cluster_secrets() { | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/secrets.yaml" | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/secrets.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/secrets.yaml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/secrets.yaml" |
} | ||
|
||
deploy_tls_cluster_secrets() { | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/ssl-secret.yaml" | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/ssl-secret.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/ssl-secret.yaml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/ssl-secret.yaml" |
} | ||
|
||
deploy_client() { | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/client.yaml" | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/client.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/client.yaml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/client.yaml" |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/minio-secret.yml" | ||
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/cloud-secret.yml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/minio-secret.yml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/cloud-secret.yml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/minio-secret.yml" | |
kubectl -n "${NAMESPACE}" apply -f "${TESTS_CONFIG_DIR}/cloud-secret.yml" |
local name_suffix=$1 | ||
|
||
yq eval "$(printf '.metadata.name="%s"' "${test_name}${name_suffix:+-$name_suffix}")" "${DEPLOY_DIR}/cr.yaml" | | ||
yq eval "$(printf '.spec.initImage="%s"' "${IMAGE}")" - | | ||
yq eval '.spec.secretsName="test-secrets"' - | | ||
yq eval '.spec.sslSecretName="test-ssl"' - | | ||
yq eval '.spec.upgradeOptions.apply="disabled"' - | | ||
yq eval '.spec.mysql.clusterType="async"' - | | ||
yq eval "$(printf '.spec.mysql.image="%s"' "${IMAGE_MYSQL}")" - | | ||
yq eval "$(printf '.spec.backup.image="%s"' "${IMAGE_BACKUP}")" - | | ||
yq eval "$(printf '.spec.orchestrator.image="%s"' "${IMAGE_ORCHESTRATOR}")" - | | ||
yq eval "$(printf '.spec.proxy.router.image="%s"' "${IMAGE_ROUTER}")" - | | ||
yq eval "$(printf '.spec.toolkit.image="%s"' "${IMAGE_TOOLKIT}")" - | | ||
yq eval "$(printf '.spec.proxy.haproxy.image="%s"' "${IMAGE_HAPROXY}")" - | | ||
yq eval "$(printf '.spec.pmm.image="%s"' "${IMAGE_PMM_CLIENT}")" - | | ||
if [ -n "${MINIKUBE}" ]; then | ||
yq eval '(.. | select(has("antiAffinityTopologyKey")).antiAffinityTopologyKey) |= "none"' - | | ||
yq eval '.spec.proxy.haproxy.resources.requests.cpu="300m"' - | ||
else | ||
yq eval - | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local name_suffix=$1 | |
yq eval "$(printf '.metadata.name="%s"' "${test_name}${name_suffix:+-$name_suffix}")" "${DEPLOY_DIR}/cr.yaml" | | |
yq eval "$(printf '.spec.initImage="%s"' "${IMAGE}")" - | | |
yq eval '.spec.secretsName="test-secrets"' - | | |
yq eval '.spec.sslSecretName="test-ssl"' - | | |
yq eval '.spec.upgradeOptions.apply="disabled"' - | | |
yq eval '.spec.mysql.clusterType="async"' - | | |
yq eval "$(printf '.spec.mysql.image="%s"' "${IMAGE_MYSQL}")" - | | |
yq eval "$(printf '.spec.backup.image="%s"' "${IMAGE_BACKUP}")" - | | |
yq eval "$(printf '.spec.orchestrator.image="%s"' "${IMAGE_ORCHESTRATOR}")" - | | |
yq eval "$(printf '.spec.proxy.router.image="%s"' "${IMAGE_ROUTER}")" - | | |
yq eval "$(printf '.spec.toolkit.image="%s"' "${IMAGE_TOOLKIT}")" - | | |
yq eval "$(printf '.spec.proxy.haproxy.image="%s"' "${IMAGE_HAPROXY}")" - | | |
yq eval "$(printf '.spec.pmm.image="%s"' "${IMAGE_PMM_CLIENT}")" - | | |
if [ -n "${MINIKUBE}" ]; then | |
yq eval '(.. | select(has("antiAffinityTopologyKey")).antiAffinityTopologyKey) |= "none"' - | | |
yq eval '.spec.proxy.haproxy.resources.requests.cpu="300m"' - | |
else | |
yq eval - | |
fi | |
local name_suffix=$1 | |
yq eval "$(printf '.metadata.name="%s"' "${test_name}${name_suffix:+-$name_suffix}")" "${DEPLOY_DIR}/cr.yaml" \ | |
| yq eval "$(printf '.spec.initImage="%s"' "${IMAGE}")" - \ | |
| yq eval '.spec.secretsName="test-secrets"' - \ | |
| yq eval '.spec.sslSecretName="test-ssl"' - \ | |
| yq eval '.spec.upgradeOptions.apply="disabled"' - \ | |
| yq eval '.spec.mysql.clusterType="async"' - \ | |
| yq eval "$(printf '.spec.mysql.image="%s"' "${IMAGE_MYSQL}")" - \ | |
| yq eval "$(printf '.spec.backup.image="%s"' "${IMAGE_BACKUP}")" - \ | |
| yq eval "$(printf '.spec.orchestrator.image="%s"' "${IMAGE_ORCHESTRATOR}")" - \ | |
| yq eval "$(printf '.spec.proxy.router.image="%s"' "${IMAGE_ROUTER}")" - \ | |
| yq eval "$(printf '.spec.toolkit.image="%s"' "${IMAGE_TOOLKIT}")" - \ | |
| yq eval "$(printf '.spec.proxy.haproxy.image="%s"' "${IMAGE_HAPROXY}")" - \ | |
| yq eval "$(printf '.spec.pmm.image="%s"' "${IMAGE_PMM_CLIENT}")" - \ | |
| if [ -n "${MINIKUBE}" ]; then | |
yq eval '(.. | select(has("antiAffinityTopologyKey")).antiAffinityTopologyKey) |= "none"' - \ | |
| yq eval '.spec.proxy.haproxy.resources.requests.cpu="300m"' - | |
else | |
yq eval - | |
fi |
kubectl -n "${NAMESPACE}" get pods \ | ||
--selector=name=mysql-client \ | ||
-o 'jsonpath={.items[].metadata.name}' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" get pods \ | |
--selector=name=mysql-client \ | |
-o 'jsonpath={.items[].metadata.name}' | |
kubectl -n "${NAMESPACE}" get pods \ | |
--selector=name=mysql-client \ | |
-o 'jsonpath={.items[].metadata.name}' |
local command="$1" | ||
local uri="$2" | ||
local pod="$3" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local command="$1" | |
local uri="$2" | |
local pod="$3" | |
local command="$1" | |
local uri="$2" | |
local pod="$3" |
client_pod=$(get_client_pod) | ||
wait_pod $client_pod 1>&2 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
client_pod=$(get_client_pod) | |
wait_pod $client_pod 1>&2 | |
client_pod=$(get_client_pod) | |
wait_pod $client_pod 1>&2 |
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | ||
bash -c "printf '%s\n' \"${command}\" | mysql -sN $uri" 2>&1 | | ||
sed -e 's/mysql: //' | | ||
(grep -v 'Using a password on the command line interface can be insecure.' || :) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | |
bash -c "printf '%s\n' \"${command}\" | mysql -sN $uri" 2>&1 | | |
sed -e 's/mysql: //' | | |
(grep -v 'Using a password on the command line interface can be insecure.' || :) | |
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | |
bash -c "printf '%s\n' \"${command}\" | mysql -sN $uri" 2>&1 \ | |
| sed -e 's/mysql: //' \ | |
| (grep -v 'Using a password on the command line interface can be insecure.' || :) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Remaining comments which cannot be posted as a review comment to avoid GitHub Rate Limit
shfmt
[shfmt] reported by reviewdog 🐶
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 391 to 404 in c12f42e
local service=$1 | |
while (kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do | |
sleep 1 | |
done | |
if [ "$(kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.clusterIP}' | |
return | |
fi | |
until kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do | |
sleep 1 | |
done | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].ip}' | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 408 to 410 in c12f42e
local cluster_name=${1} | |
local cluster_size=${2} | |
local orc_size=${3} |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 412 to 414 in c12f42e
if [ -z "${orc_size}" ]; then | |
orc_size=3 | |
fi |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 416 to 424 in c12f42e
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.ready}')" == "${orc_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (async)' | |
sleep 15 | |
done |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 428 to 430 in c12f42e
local cluster_name=${1} | |
local cluster_size=${2} | |
local router_size=${3} |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 432 to 434 in c12f42e
if [ -z "${router_size}" ]; then | |
router_size=3 | |
fi |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 436 to 444 in c12f42e
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.router.ready}')" == "${router_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.router.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (group replication)' | |
sleep 15 | |
done |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 448 to 470 in c12f42e
local pod=$1 | |
set +o xtrace | |
retry=0 | |
echo -n $pod | |
until kubectl get pod/$pod -n "${NAMESPACE}" -o jsonpath='{.status.containerStatuses[0].ready}' 2>/dev/null | grep 'true'; do | |
sleep 1 | |
echo -n . | |
let retry+=1 | |
if [ $retry -ge 360 ]; then | |
kubectl describe pod/$pod -n "${NAMESPACE}" | |
kubectl logs $pod -n "${NAMESPACE}" | |
kubectl logs $(get_operator_pod) -n "${OPERATOR_NS:-$NAMESPACE}" | | |
grep -v 'level=info' | | |
grep -v 'level=debug' | | |
grep -v 'Getting tasks for pod' | | |
grep -v 'Getting pods from source' | | |
tail -100 | |
echo max retry count $retry reached. something went wrong with operator or kubernetes cluster | |
exit 1 | |
fi | |
done | |
set -o xtrace |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 474 to 497 in c12f42e
local name=$1 | |
local target_namespace=${2:-"$namespace"} | |
sleep 10 | |
set +o xtrace | |
retry=0 | |
echo -n $name | |
until [ -n "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.replicas}')" \ | |
-a "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.replicas}')" \ | |
== "$(kubectl -n ${target_namespace} get deployment $name -o jsonpath='{.status.readyReplicas}')" ]; do | |
sleep 1 | |
echo -n . | |
let retry+=1 | |
if [ $retry -ge 360 ]; then | |
kubectl logs $(get_operator_pod) -c operator | | |
grep -v 'level=info' | | |
grep -v 'level=debug' | | |
tail -100 | |
echo max retry count $retry reached. something went wrong with operator or kubernetes cluster | |
exit 1 | |
fi | |
done | |
echo | |
set -o xtrace |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 501 to 535 in c12f42e
local RAM_SIZE=$1 | |
local RDS_MEM_INSTANCE=12582880 | |
local CUSTOM_INNODB_SIZE=$2 | |
local CUSTOM_CONNECTIONS=$3 | |
local INNODB_SIZE=$(run_mysql \ | |
'SELECT @@innodb_buffer_pool_size;' \ | |
"-h $(get_haproxy_svc "$(get_cluster_name)") -uroot -proot_password") | |
local CONNECTIONS=$(run_mysql \ | |
'SELECT @@max_connections;' \ | |
"-h $(get_haproxy_svc "$(get_cluster_name)") -uroot -proot_password") | |
if [[ -n ${CUSTOM_INNODB_SIZE} ]]; then | |
if [[ ${INNODB_SIZE} != ${CUSTOM_INNODB_SIZE} ]]; then | |
echo "innodb_buffer_pool_size is set to ${INNODB_SIZE}, which does not correlate with ${CUSTOM_INNODB_SIZE} from custom config" | |
exit 1 | |
fi | |
else | |
if [[ ${INNODB_SIZE} != $((RAM_SIZE * 50 / 100)) ]]; then | |
echo "innodb_buffer_pool_size is set to ${INNODB_SIZE}, which does not correlate with cr.pxc.limits.memory * 0.5" | |
exit 1 | |
fi | |
fi | |
if [[ -n ${CUSTOM_CONNECTIONS} ]]; then | |
if [[ ${CONNECTIONS} != ${CUSTOM_CONNECTIONS} ]]; then | |
echo "max_connections is set to ${AUTO_CONNECTIONS}, which does not correlate with ${CUSTOM_CONNECTIONS} from custom config" | |
exit 1 | |
fi | |
else | |
if [[ ${CONNECTIONS} != $((RAM_SIZE / RDS_MEM_INSTANCE)) ]]; then | |
echo "max_connections is set to ${CONNECTIONS}, which does not correlate with cr.pxc.limits.memory / ${RDS_MEM_INSTANCE}" | |
exit 1 | |
fi | |
fi |
[shfmt] reported by reviewdog 🐶
local cluster=$1 |
[shfmt] reported by reviewdog 🐶
echo "${cluster}-router" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 545 to 547 in c12f42e
kubectl create configmap -n "${OPERATOR_NS:-$NAMESPACE}" versions \ | |
--from-file "${TESTS_CONFIG_DIR}/operator.9.9.9.ps-operator.dep.json" \ | |
--from-file "${TESTS_CONFIG_DIR}/operator.9.9.9.ps-operator.json" |
[shfmt] reported by reviewdog 🐶
kubectl apply -n "${OPERATOR_NS:-$NAMESPACE}" -f "${TESTS_CONFIG_DIR}/vs.yaml" |
[shfmt] reported by reviewdog 🐶
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 555 to 557 in c12f42e
kubectl create namespace cert-manager || : | |
kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true || : | |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml --validate=false || : 2>/dev/null |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 561 to 562 in c12f42e
kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml --validate=false || : 2>/dev/null | |
kubectl delete --grace-period=0 --force=true namespace cert-manager |
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" get pods -l mysql.percona.com/primary=true -ojsonpath="{.items[0].metadata.name}" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 570 to 571 in c12f42e
local haproxy_pod=$1 | |
local haproxy_pod_ip=$(kubectl -n "${NAMESPACE}" get pods ${haproxy_pod} -o jsonpath="{.status.podIP}") |
[shfmt] reported by reviewdog 🐶
run_mysql "SHOW VARIABLES LIKE '%hostname%';" "-h ${haproxy_pod_ip} -P3306 -uroot -proot_password" | awk '{print $2}' |
[shfmt] reported by reviewdog 🐶
run_mysql "SELECT MEMBER_HOST FROM performance_schema.replication_group_members where MEMBER_ROLE='PRIMARY';" "-h $(get_mysql_router_service $(get_cluster_name)) -P 6446 -uroot -proot_password" | cut -d'.' -f1 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 581 to 584 in c12f42e
local certificate=$1 | |
local expected_sans=$2 | |
local have=$(mktemp) | |
local want=$(mktemp) |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 586 to 587 in c12f42e
kubectl -n "${NAMESPACE}" get certificate "${certificate}" -o jsonpath='{.spec.dnsNames}' | jq '.' >"${have}" | |
echo "${expected_sans}" | jq '.' >"${want}" |
[shfmt] reported by reviewdog 🐶
diff "${have}" "${want}" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 593 to 630 in c12f42e
local secrets | |
local passwords | |
local pods | |
secrets=$(kubectl get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value') | |
passwords="$(for i in $secrets; do | |
base64 -d <<<$i | |
echo | |
done) $secrets" | |
pods=$(kubectl -n "${NAMESPACE}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs() { | |
local containers | |
local count | |
NS=$1 | |
for p in $pods; do | |
local containers=$(kubectl -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}') | |
for c in $containers; do | |
kubectl -n "$NS" logs $p -c $c >${TEMP_DIR}/logs_output-$p-$c.txt | |
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt | |
for pass in $passwords; do | |
local count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :) | |
if [[ $count != 0 ]]; then | |
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt | |
false | |
fi | |
done | |
done | |
echo | |
done | |
} | |
collect_logs $NAMESPACE | |
if [ -n "$OPERATOR_NS" ]; then | |
local pods=$(kubectl -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs $OPERATOR_NS | |
fi |
[shfmt] reported by reviewdog 🐶
destroy_chaos_mesh |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 636 to 642 in c12f42e
helm repo add chaos-mesh https://charts.chaos-mesh.org | |
if [ -n "${MINIKUBE}" ]; then | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=docker --set dashboard.create=false --version 2.5.1 | |
else | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 | |
fi | |
sleep 10 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 646 to 666 in c12f42e
local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//') | |
if [ -n "${chaos_mesh_ns}" ]; then | |
helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || : | |
fi | |
timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || : | |
for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do | |
kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace | | |
while read -r line; do | |
local kind=$(echo "$line" | awk '{print $1}') | |
local name=$(echo "$line" | awk '{print $2}') | |
local namespace=$(echo "$line" | awk '{print $3}') | |
kubectl patch $kind $name -n "${namespace}" --type=merge -p '{"metadata":{"finalizers":[]}}' || : | |
done | |
timeout 30 kubectl delete ${i} --all --all-namespaces || : | |
done | |
timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || : |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 670 to 677 in c12f42e
local ns=$1 | |
local selector=$2 | |
local pod_label=$3 | |
local label_value=$4 | |
local chaos_suffix=$5 | |
if [ "${selector}" == "pod" ]; then | |
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 680 to 683 in c12f42e
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
elif [ "${selector}" == "label" ]; then | |
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 687 to 690 in c12f42e
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
fi | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 694 to 696 in c12f42e
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 701 to 703 in c12f42e
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 707 to 709 in c12f42e
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 714 to 716 in c12f42e
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-network-loss.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
certificate="$1" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 722 to 723 in c12f42e
local pod_name | |
pod_name=$(kubectl get -n "${NAMESPACE}" pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}') |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 725 to 726 in c12f42e
local revision | |
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') |
[shfmt] reported by reviewdog 🐶
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 730 to 738 in c12f42e
# wait for new revision | |
for i in {1..10}; do | |
local new_revision | |
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
if [ "$((revision + 1))" == "$new_revision" ]; then | |
break | |
fi | |
sleep 1 | |
done |
[shfmt] reported by reviewdog 🐶
local service_account="cmctl" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 744 to 747 in c12f42e
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | |
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | |
kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
local command="$1" | ||
local uri="$2" | ||
local pod="$3" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local command="$1" | |
local uri="$2" | |
local pod="$3" | |
local command="$1" | |
local uri="$2" | |
local pod="$3" |
client_pod=$(get_client_pod) | ||
wait_pod $client_pod 1>&2 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
client_pod=$(get_client_pod) | |
wait_pod $client_pod 1>&2 | |
client_pod=$(get_client_pod) | |
wait_pod $client_pod 1>&2 |
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | ||
bash -c "printf '%s\n' \"${command}\" | mysqlsh --sql --quiet-start=2 $uri" 2>&1 | | ||
tail -n +2 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | |
bash -c "printf '%s\n' \"${command}\" | mysqlsh --sql --quiet-start=2 $uri" 2>&1 | | |
tail -n +2 | |
kubectl -n "${NAMESPACE}" exec "${pod:-mysql-client}" -- \ | |
bash -c "printf '%s\n' \"${command}\" | mysqlsh --sql --quiet-start=2 $uri" 2>&1 \ | |
| tail -n +2 |
} | ||
|
||
run_curl() { | ||
kubectl -n "${NAMESPACE}" exec mysql-client -- bash -c "curl -s -k $*" | ||
kubectl -n "${NAMESPACE}" exec mysql-client -- bash -c "curl -s -k $*" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl -n "${NAMESPACE}" exec mysql-client -- bash -c "curl -s -k $*" | |
kubectl -n "${NAMESPACE}" exec mysql-client -- bash -c "curl -s -k $*" |
} | ||
|
||
get_innodb_cluster_name() { | ||
echo $(get_cluster_name) | tr -cd '[^a-zA-Z0-9_]+' | ||
echo $(get_cluster_name) | tr -cd '[^a-zA-Z0-9_]+' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
echo $(get_cluster_name) | tr -cd '[^a-zA-Z0-9_]+' | |
echo $(get_cluster_name) | tr -cd '[^a-zA-Z0-9_]+' |
@@ -368,382 +367,382 @@ get_qan20_values() { | |||
"period_start_to": "$($date -u '+%Y-%m-%dT%H:%M:%S%:z')" | |||
} | |||
EOF | |||
) | |||
) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
) | |
) |
run_curl -XPOST -d "'$(echo ${payload} | sed 's/\n//g')'" "https://${user_pass}@${endpoint}/v0/qan/GetReport" | | ||
jq '.rows[].fingerprint' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
run_curl -XPOST -d "'$(echo ${payload} | sed 's/\n//g')'" "https://${user_pass}@${endpoint}/v0/qan/GetReport" | | |
jq '.rows[].fingerprint' | |
run_curl -XPOST -d "'$(echo ${payload} | sed 's/\n//g')'" "https://${user_pass}@${endpoint}/v0/qan/GetReport" \ | |
| jq '.rows[].fingerprint' |
} | ||
|
||
get_mysql_pods() { | ||
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=mysql | awk '{print $1}' | ||
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=mysql | awk '{print $1}' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=mysql | awk '{print $1}' | |
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=mysql | awk '{print $1}' |
} | ||
|
||
get_router_pods() { | ||
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=router | awk '{print $1}' | ||
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=router | awk '{print $1}' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=router | awk '{print $1}' | |
kubectl get pod -n "${NAMESPACE}" --no-headers --selector=app.kubernetes.io/component=router | awk '{print $1}' |
} | ||
|
||
get_mysql_users() { | ||
local args=$1 | ||
local args=$1 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local args=$1 | |
local args=$1 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Remaining comments which cannot be posted as a review comment to avoid GitHub Rate Limit
shfmt
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 680 to 683 in ef8cdba
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
elif [ "${selector}" == "label" ]; then | |
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 687 to 690 in ef8cdba
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
fi | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 694 to 696 in ef8cdba
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 701 to 703 in ef8cdba
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 707 to 709 in ef8cdba
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
[shfmt] reported by reviewdog 🐶
yq eval ' |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 714 to 716 in ef8cdba
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-network-loss.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 |
[shfmt] reported by reviewdog 🐶
certificate="$1" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 722 to 723 in ef8cdba
local pod_name | |
pod_name=$(kubectl get -n "${NAMESPACE}" pods --selector=name=cmctl -o 'jsonpath={.items[].metadata.name}') |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 725 to 726 in ef8cdba
local revision | |
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') |
[shfmt] reported by reviewdog 🐶
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 730 to 738 in ef8cdba
# wait for new revision | |
for i in {1..10}; do | |
local new_revision | |
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
if [ "$((revision + 1))" == "$new_revision" ]; then | |
break | |
fi | |
sleep 1 | |
done |
[shfmt] reported by reviewdog 🐶
local service_account="cmctl" |
[shfmt] reported by reviewdog 🐶
percona-server-mysql-operator/e2e-tests/functions
Lines 744 to 747 in ef8cdba
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | |
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | |
kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
|
||
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" | ||
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" | |
run_mysql "SELECT user FROM mysql.user" "${args}" | grep -vE "mysql|root" |
local service=$1 | ||
|
||
while (kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do | ||
sleep 1 | ||
done | ||
if [ "$(kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then | ||
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.clusterIP}' | ||
return | ||
fi | ||
until kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do | ||
sleep 1 | ||
done | ||
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].ip}' | ||
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local service=$1 | |
while (kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do | |
sleep 1 | |
done | |
if [ "$(kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.clusterIP}' | |
return | |
fi | |
until kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do | |
sleep 1 | |
done | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].ip}' | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' | |
local service=$1 | |
while (kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}' 2>&1 || :) | grep -q NotFound; do | |
sleep 1 | |
done | |
if [ "$(kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.type}')" = "ClusterIP" ]; then | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.spec.clusterIP}' | |
return | |
fi | |
until kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[]}' 2>&1 | egrep -q "hostname|ip"; do | |
sleep 1 | |
done | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].ip}' | |
kubectl get service/$service -n "${NAMESPACE}" -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' |
local cluster_name=${1} | ||
local cluster_size=${2} | ||
local orc_size=${3} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local cluster_name=${1} | |
local cluster_size=${2} | |
local orc_size=${3} | |
local cluster_name=${1} | |
local cluster_size=${2} | |
local orc_size=${3} |
if [ -z "${orc_size}" ]; then | ||
orc_size=3 | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
if [ -z "${orc_size}" ]; then | |
orc_size=3 | |
fi | |
if [ -z "${orc_size}" ]; then | |
orc_size=3 | |
fi |
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | ||
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | ||
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | ||
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.ready}')" == "${orc_size}" && | ||
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.state}')" == "ready" && | ||
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | ||
echo 'waiting for cluster readyness (async)' | ||
sleep 15 | ||
done |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.ready}')" == "${orc_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (async)' | |
sleep 15 | |
done | |
sleep 7 # wait for two reconcile loops ;) 3 sec x 2 times + 1 sec = 7 seconds | |
until [[ "$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.mysql.ready}')" == "${cluster_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.ready}')" == "${orc_size}" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.orchestrator.state}')" == "ready" && | |
"$(kubectl get ps "${cluster_name}" -n "${NAMESPACE}" -o jsonpath='{.status.state}')" == "ready" ]]; do | |
echo 'waiting for cluster readyness (async)' | |
sleep 15 | |
done |
local secrets | ||
local passwords | ||
local pods | ||
|
||
secrets=$(kubectl get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value') | ||
passwords="$(for i in $secrets; do | ||
base64 -d <<<$i | ||
echo | ||
done) $secrets" | ||
pods=$(kubectl -n "${NAMESPACE}" get pods -o name | awk -F "/" '{print $2}') | ||
|
||
collect_logs() { | ||
local containers | ||
local count | ||
|
||
NS=$1 | ||
for p in $pods; do | ||
local containers=$(kubectl -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}') | ||
for c in $containers; do | ||
kubectl -n "$NS" logs $p -c $c >${TEMP_DIR}/logs_output-$p-$c.txt | ||
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt | ||
for pass in $passwords; do | ||
local count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :) | ||
if [[ $count != 0 ]]; then | ||
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt | ||
false | ||
fi | ||
done | ||
done | ||
echo | ||
done | ||
} | ||
|
||
collect_logs $NAMESPACE | ||
if [ -n "$OPERATOR_NS" ]; then | ||
local pods=$(kubectl -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}') | ||
collect_logs $OPERATOR_NS | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local secrets | |
local passwords | |
local pods | |
secrets=$(kubectl get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value') | |
passwords="$(for i in $secrets; do | |
base64 -d <<<$i | |
echo | |
done) $secrets" | |
pods=$(kubectl -n "${NAMESPACE}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs() { | |
local containers | |
local count | |
NS=$1 | |
for p in $pods; do | |
local containers=$(kubectl -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}') | |
for c in $containers; do | |
kubectl -n "$NS" logs $p -c $c >${TEMP_DIR}/logs_output-$p-$c.txt | |
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt | |
for pass in $passwords; do | |
local count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :) | |
if [[ $count != 0 ]]; then | |
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt | |
false | |
fi | |
done | |
done | |
echo | |
done | |
} | |
collect_logs $NAMESPACE | |
if [ -n "$OPERATOR_NS" ]; then | |
local pods=$(kubectl -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs $OPERATOR_NS | |
fi | |
local secrets | |
local passwords | |
local pods | |
secrets=$(kubectl get secrets -o json | jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value') | |
passwords="$(for i in $secrets; do | |
base64 -d <<<$i | |
echo | |
done) $secrets" | |
pods=$(kubectl -n "${NAMESPACE}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs() { | |
local containers | |
local count | |
NS=$1 | |
for p in $pods; do | |
local containers=$(kubectl -n "$NS" get pod $p -o jsonpath='{.spec.containers[*].name}') | |
for c in $containers; do | |
kubectl -n "$NS" logs $p -c $c >${TEMP_DIR}/logs_output-$p-$c.txt | |
echo logs saved in: ${TEMP_DIR}/logs_output-$p-$c.txt | |
for pass in $passwords; do | |
local count=$(grep -c --fixed-strings -- "$pass" ${TEMP_DIR}/logs_output-$p-$c.txt || :) | |
if [[ $count != 0 ]]; then | |
echo leaked passwords are found in log ${TEMP_DIR}/logs_output-$p-$c.txt | |
false | |
fi | |
done | |
done | |
echo | |
done | |
} | |
collect_logs $NAMESPACE | |
if [ -n "$OPERATOR_NS" ]; then | |
local pods=$(kubectl -n "${OPERATOR_NS}" get pods -o name | awk -F "/" '{print $2}') | |
collect_logs $OPERATOR_NS | |
fi |
} | ||
|
||
deploy_chaos_mesh() { | ||
destroy_chaos_mesh | ||
destroy_chaos_mesh |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
destroy_chaos_mesh | |
destroy_chaos_mesh |
helm repo add chaos-mesh https://charts.chaos-mesh.org | ||
if [ -n "${MINIKUBE}" ]; then | ||
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=docker --set dashboard.create=false --version 2.5.1 | ||
else | ||
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 | ||
fi | ||
sleep 10 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
helm repo add chaos-mesh https://charts.chaos-mesh.org | |
if [ -n "${MINIKUBE}" ]; then | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=docker --set dashboard.create=false --version 2.5.1 | |
else | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 | |
fi | |
sleep 10 | |
helm repo add chaos-mesh https://charts.chaos-mesh.org | |
if [ -n "${MINIKUBE}" ]; then | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=docker --set dashboard.create=false --version 2.5.1 | |
else | |
helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${NAMESPACE} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 | |
fi | |
sleep 10 |
local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//') | ||
|
||
if [ -n "${chaos_mesh_ns}" ]; then | ||
helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || : | ||
fi | ||
timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | ||
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | ||
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || : | ||
for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do | ||
kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace | | ||
while read -r line; do | ||
local kind=$(echo "$line" | awk '{print $1}') | ||
local name=$(echo "$line" | awk '{print $2}') | ||
local namespace=$(echo "$line" | awk '{print $3}') | ||
kubectl patch $kind $name -n "${namespace}" --type=merge -p '{"metadata":{"finalizers":[]}}' || : | ||
done | ||
timeout 30 kubectl delete ${i} --all --all-namespaces || : | ||
done | ||
timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || : | ||
timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || : | ||
timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || : |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//') | |
if [ -n "${chaos_mesh_ns}" ]; then | |
helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || : | |
fi | |
timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || : | |
for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do | |
kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace | | |
while read -r line; do | |
local kind=$(echo "$line" | awk '{print $1}') | |
local name=$(echo "$line" | awk '{print $2}') | |
local namespace=$(echo "$line" | awk '{print $3}') | |
kubectl patch $kind $name -n "${namespace}" --type=merge -p '{"metadata":{"finalizers":[]}}' || : | |
done | |
timeout 30 kubectl delete ${i} --all --all-namespaces || : | |
done | |
timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || : | |
local chaos_mesh_ns=$(helm list --all-namespaces --filter chaos-mesh | tail -n1 | awk -F' ' '{print $2}' | sed 's/NAMESPACE//') | |
if [ -n "${chaos_mesh_ns}" ]; then | |
helm uninstall --wait --timeout 60s chaos-mesh --namespace ${chaos_mesh_ns} || : | |
fi | |
timeout 30 kubectl delete MutatingWebhookConfiguration $(kubectl get MutatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete ValidatingWebhookConfiguration $(kubectl get ValidatingWebhookConfiguration | grep 'validate-auth' | awk '{print $1}') || : | |
for i in $(kubectl api-resources | grep chaos-mesh | awk '{print $1}'); do | |
kubectl get ${i} --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace \ | |
| while read -r line; do | |
local kind=$(echo "$line" | awk '{print $1}') | |
local name=$(echo "$line" | awk '{print $2}') | |
local namespace=$(echo "$line" | awk '{print $3}') | |
kubectl patch $kind $name -n "${namespace}" --type=merge -p '{"metadata":{"finalizers":[]}}' || : | |
done | |
timeout 30 kubectl delete ${i} --all --all-namespaces || : | |
done | |
timeout 30 kubectl delete crd $(kubectl get crd | grep 'chaos-mesh.org' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrolebinding $(kubectl get clusterrolebinding | grep 'chaos-mesh' | awk '{print $1}') || : | |
timeout 30 kubectl delete clusterrole $(kubectl get clusterrole | grep 'chaos-mesh' | awk '{print $1}') || : |
local ns=$1 | ||
local selector=$2 | ||
local pod_label=$3 | ||
local label_value=$4 | ||
local chaos_suffix=$5 | ||
|
||
if [ "${selector}" == "pod" ]; then | ||
yq eval ' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local ns=$1 | |
local selector=$2 | |
local pod_label=$3 | |
local label_value=$4 | |
local chaos_suffix=$5 | |
if [ "${selector}" == "pod" ]; then | |
yq eval ' | |
local ns=$1 | |
local selector=$2 | |
local pod_label=$3 | |
local label_value=$4 | |
local chaos_suffix=$5 | |
if [ "${selector}" == "pod" ]; then | |
yq eval ' |
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | ||
kubectl apply --namespace ${ns} -f - | ||
elif [ "${selector}" == "label" ]; then | ||
yq eval ' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
elif [ "${selector}" == "label" ]; then | |
yq eval ' | |
.spec.selector.pods.'${ns}'[0] = "'${pod_label}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml \ | |
| kubectl apply --namespace ${ns} -f - | |
elif [ "${selector}" == "label" ]; then | |
yq eval ' |
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | ||
kubectl apply --namespace ${ns} -f - | ||
fi | ||
sleep 5 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml | | |
kubectl apply --namespace ${ns} -f - | |
fi | |
sleep 5 | |
.spec.selector.labelSelectors."'${pod_label}'" = "'${label_value}'"' ${TESTS_CONFIG_DIR}/chaos-pod-kill.yml \ | |
| kubectl apply --namespace ${ns} -f - | |
fi | |
sleep 5 |
local ns=$1 | ||
local pod=$2 | ||
local chaos_suffix=$3 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 | |
local ns=$1 | |
local pod=$2 | |
local chaos_suffix=$3 |
|
||
yq eval ' | ||
yq eval ' |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
yq eval ' | |
yq eval ' |
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml | | ||
kubectl apply --namespace ${ns} -f - | ||
sleep 5 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml | | |
kubectl apply --namespace ${ns} -f - | |
sleep 5 | |
.spec.selector.pods.'${ns}'[0] = "'${pod}'"' ${TESTS_CONFIG_DIR}/chaos-pod-failure.yml \ | |
| kubectl apply --namespace ${ns} -f - | |
sleep 5 |
local revision | ||
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local revision | |
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
local revision | |
revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') |
|
||
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" | ||
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" | |
kubectl exec -n "${NAMESPACE}" "$pod_name" -- /tmp/cmctl renew "$certificate" |
# wait for new revision | ||
for i in {1..10}; do | ||
local new_revision | ||
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | ||
if [ "$((revision + 1))" == "$new_revision" ]; then | ||
break | ||
fi | ||
sleep 1 | ||
done |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
# wait for new revision | |
for i in {1..10}; do | |
local new_revision | |
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
if [ "$((revision + 1))" == "$new_revision" ]; then | |
break | |
fi | |
sleep 1 | |
done | |
# wait for new revision | |
for i in {1..10}; do | |
local new_revision | |
new_revision=$(kubectl get -n "${NAMESPACE}" certificate "$certificate" -o 'jsonpath={.status.revision}') | |
if [ "$((revision + 1))" == "$new_revision" ]; then | |
break | |
fi | |
sleep 1 | |
done |
} | ||
|
||
deploy_cmctl() { | ||
local service_account="cmctl" | ||
local service_account="cmctl" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local service_account="cmctl" | |
local service_account="cmctl" |
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | ||
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | ||
kubectl apply -n "${NAMESPACE}" -f - | ||
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | |
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | |
kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" | |
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" \ | |
| yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \ | |
| kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
e2e-tests/functions
Outdated
@@ -33,318 +33,317 @@ create_namespace() { | |||
} | |||
|
|||
deploy_operator() { | |||
destroy_operator | |||
destroy_operator |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
destroy_operator | |
destroy_operator |
if [[ $OPERATOR_NS ]]; then | ||
create_namespace "${OPERATOR_NS}" | ||
fi | ||
|
||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply --server-side --force-conflicts -f "${DEPLOY_DIR}/crd.yaml" | ||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply --server-side --force-conflicts -f "${DEPLOY_DIR}/crd.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
if [[ $OPERATOR_NS ]]; then | |
create_namespace "${OPERATOR_NS}" | |
fi | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply --server-side --force-conflicts -f "${DEPLOY_DIR}/crd.yaml" | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply --server-side --force-conflicts -f "${DEPLOY_DIR}/crd.yaml" | |
if [[ $OPERATOR_NS ]]; then | |
create_namespace "${OPERATOR_NS}" | |
fi |
if [ -n "$OPERATOR_NS" ]; then | ||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/cw-rbac.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
if [ -n "$OPERATOR_NS" ]; then | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/cw-rbac.yaml" | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply --server-side --force-conflicts -f "${DEPLOY_DIR}/crd.yaml" |
yq eval \ | ||
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | ||
"${DEPLOY_DIR}/cw-operator.yaml" | | ||
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' | | ||
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' | | ||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | ||
else | ||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/rbac.yaml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
yq eval \ | |
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | |
"${DEPLOY_DIR}/cw-operator.yaml" | | |
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' | | |
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' | | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | |
else | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/rbac.yaml" | |
if [ -n "$OPERATOR_NS" ]; then | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/cw-rbac.yaml" |
yq eval \ | ||
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | ||
"${DEPLOY_DIR}/operator.yaml" | | ||
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' | | ||
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' | | ||
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | ||
fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
yq eval \ | |
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | |
"${DEPLOY_DIR}/operator.yaml" | | |
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' | | |
yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' | | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | |
fi | |
yq eval \ | |
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | |
"${DEPLOY_DIR}/cw-operator.yaml" \ | |
| yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' \ | |
| yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' \ | |
| kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | |
else | |
kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f "${DEPLOY_DIR}/rbac.yaml" | |
yq eval \ | |
"$(printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' "${IMAGE}")" \ | |
"${DEPLOY_DIR}/operator.yaml" \ | |
| yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' \ | |
| yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' \ | |
| kubectl -n "${OPERATOR_NS:-$NAMESPACE}" apply -f - | |
fi |
local max=$1 | ||
local delay=$2 | ||
shift 2 # cut delay and max args | ||
local n=1 | ||
|
||
until "$@"; do | ||
if [[ $n -ge $max ]]; then | ||
echo "The command ${*} has failed after $n attempts." | ||
exit 1 | ||
fi | ||
((n++)) | ||
sleep $delay | ||
done |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local max=$1 | |
local delay=$2 | |
shift 2 # cut delay and max args | |
local n=1 | |
until "$@"; do | |
if [[ $n -ge $max ]]; then | |
echo "The command ${*} has failed after $n attempts." | |
exit 1 | |
fi | |
((n++)) | |
sleep $delay | |
done | |
local max=$1 | |
local delay=$2 | |
shift 2 # cut delay and max args | |
local n=1 | |
until "$@"; do | |
if [[ $n -ge $max ]]; then | |
echo "The command ${*} has failed after $n attempts." | |
exit 1 | |
fi | |
((n++)) | |
sleep $delay | |
done |
} | ||
|
||
deploy_cmctl() { | ||
local service_account="cmctl" | ||
local service_account="cmctl" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
local service_account="cmctl" | |
local service_account="cmctl" |
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | ||
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | ||
kubectl apply -n "${NAMESPACE}" -f - | ||
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
[shfmt] reported by reviewdog 🐶
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" | | |
yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' | | |
kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" | |
sed -e "s/percona-server-mysql-operator/$service_account/g" "${DEPLOY_DIR}/rbac.yaml" \ | |
| yq '(select(.rules).rules[] | select(contains({"apiGroups": ["cert-manager.io"]}))).resources += "certificates/status"' \ | |
| kubectl apply -n "${NAMESPACE}" -f - | |
kubectl apply -n "${NAMESPACE}" -f "${TESTS_CONFIG_DIR}/cmctl.yml" |
commit: 6555938 |
CHANGE DESCRIPTION
Problem:
async self healing test is currently failing on the main branch and will need to be merged after fixes for K8SPS-289 and K8SPS-288
which is altogether probable connected with #339
CHECKLIST
Jira
Needs Doc
) and QA (Needs QA
)?Tests
Config/Logging/Testability