diff --git a/doc/simulation/inspector/README.md b/doc/simulation/inspector/README.md index d8dc9107f..f49fa6091 100644 --- a/doc/simulation/inspector/README.md +++ b/doc/simulation/inspector/README.md @@ -23,14 +23,12 @@ The inspector is inspired by [acm-inspector](https://github.com/bjoydeep/acm-ins selector: name: multicluster-global-hub-postgres type: LoadBalancer - status: - loadBalancer: {} EOF ``` -3. The `python3` and the tool `pip3` have been installed on your environment. +3. The `python` and the tool `pip` have been installed on your environment. 4. Enable the `Prometheus` on your global hub. -5. Running the `pip3 install -r ./doc/simulation/inspector/requirements.txt` to install dependencies. +5. Running the `pip install -r ./doc/simulation/inspector/requirements.txt` to install dependencies. ## Running the inspector @@ -59,10 +57,11 @@ The inspector is inspired by [acm-inspector](https://github.com/bjoydeep/acm-ins ``` - Stop the backend process - + ```bash ./doc/simulation/inspector/cmd/counter.sh stop ``` + ### Get CPU and Memory information ```bash diff --git a/doc/simulation/inspector/cmd/check.sh b/doc/simulation/inspector/cmd/check.sh index c7125ad82..175c72e0a 100755 --- a/doc/simulation/inspector/cmd/check.sh +++ b/doc/simulation/inspector/cmd/check.sh @@ -7,4 +7,4 @@ REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/.." ; pwd -P)" output=${REPO_DIR}/output mkdir -p ${output} -python3 ${REPO_DIR}/src/entry.py "$1" "$2" \ No newline at end of file +python ${REPO_DIR}/src/entry.py "$1" "$2" \ No newline at end of file diff --git a/doc/simulation/inspector/cmd/check_agent.sh b/doc/simulation/inspector/cmd/check_agent.sh index ece0461c0..b9db4fb3a 100755 --- a/doc/simulation/inspector/cmd/check_agent.sh +++ b/doc/simulation/inspector/cmd/check_agent.sh @@ -7,4 +7,4 @@ REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/.." ; pwd -P)" output=${REPO_DIR}/output mkdir -p ${output} -python3 ${REPO_DIR}/src/agent.py "$1" "$2" \ No newline at end of file +python ${REPO_DIR}/src/agent.py "$1" "$2" \ No newline at end of file diff --git a/doc/simulation/inspector/cmd/counter.sh b/doc/simulation/inspector/cmd/counter.sh index 252798e87..52b36256b 100755 --- a/doc/simulation/inspector/cmd/counter.sh +++ b/doc/simulation/inspector/cmd/counter.sh @@ -10,14 +10,14 @@ mkdir -p ${output} # Function to start the backend application start_backend() { echo "Starting the backend counter..." - python3 ${REPO_DIR}/src/counter.py override 2>&1 > ${output}/counter.log & + python ${REPO_DIR}/src/counter.py override 2>&1 > ${output}/counter.log & } # Function to start the backend application continue_backend() { echo "Continue the backend counter..." pkill -f ${REPO_DIR}/src/counter.py - python3 ${REPO_DIR}/src/counter.py 2>&1 >> ${output}/counter.log & + python ${REPO_DIR}/src/counter.py 2>&1 >> ${output}/counter.log & } # Function to stop the backend application @@ -25,12 +25,12 @@ stop_backend() { echo "Stopping the backend counter..." # Replace the following line with the actual command or process name to stop your backend app pkill -f ${REPO_DIR}/src/counter.py - python3 ${REPO_DIR}/src/counter.py draw + python ${REPO_DIR}/src/counter.py draw } csv_draw() { echo "Drawing from the csv..." - python3 ${REPO_DIR}/src/counter.py draw + python ${REPO_DIR}/src/counter.py draw } # Check if an argument is provided (start or stop) diff --git a/doc/simulation/inspector/src/counter.py b/doc/simulation/inspector/src/counter.py index 0cdcd9651..e0cbfb804 100644 --- a/doc/simulation/inspector/src/counter.py +++ b/doc/simulation/inspector/src/counter.py @@ -61,10 +61,11 @@ def record_initial(override): while True: try: cur.execute(initial_sql) - except ValueError: - print("Invalid operation.", ValueError) + except Exception as e: + print("Error executing SQL query:", e) connection = get_conn() cur = connection.cursor() + continue df = pd.DataFrame([ {'time': datetime.now(pytz.utc).strftime("%Y-%m-%d %H:%M:%S"), 'compliance': 0, 'event': 0, 'cluster': 0}, @@ -94,10 +95,11 @@ def record_compliance(override): while True: try: cur.execute(compliance_sql) - except ValueError: - print("Invalid operation.", ValueError) + except Exception as e: + print("Error executing SQL query:", e) connection = get_conn() cur = connection.cursor() + continue # cur.execute(compliance_sql) df = pd.DataFrame([ {'time': datetime.now(pytz.utc).strftime("%Y-%m-%d %H:%M:%S"), 'compliant': 0, 'non_compliant': 0}, diff --git a/doc/simulation/local-policies/policy.sh b/doc/simulation/local-policies/policy.sh index 3e05aec76..4c49e19f7 100755 --- a/doc/simulation/local-policies/policy.sh +++ b/doc/simulation/local-policies/policy.sh @@ -121,8 +121,9 @@ kubectl patch policy $root_policy_namespace.$root_plicy_name -n $cluster_name -- } function generate_placement() { -placement_namespace=$1 -placement_name=$2 +placement_namespace="$1" +placement_name="$2" +decsion="$3" cat < " + exit 1 +fi + +# Parse the parameter using the delimiter ":" +IFS=':' read -r policy_start policy_end <<< "$1" +compliance_state=$2 +export KUBECONFIG=$3 +concurrent="${4:-1}" + +sorted_clusters=$(kubectl get mcl | grep -oE 'managedcluster-[0-9]+' | awk -F"-" '{print $2}' | sort -n) +cluster_start=$(echo "$sorted_clusters" | head -n 1) +cluster_end=$(echo "$sorted_clusters" | tail -n 1) + +echo ">> KUBECONFIG=$KUBECONFIG" +echo ">> Rotating Policy $policy_start~$policy_end to $compliance_state on cluster $cluster_start~$cluster_end" + +random_number=$(shuf -i 10000-99999 -n 1) + +function update_cluster_policies() { + root_policy_namespace=default + root_policy_name=$1 + root_policy_status=$2 + + echo ">> Rotating $root_policy_name to $root_policy_status on cluster $cluster_start~$cluster_end" + + count=0 + # path replicas policy: rootpolicy namespace, name and managed cluster + for j in $(seq $cluster_start $cluster_end); do + + cluster_name=managedcluster-${j} + event_name=$root_policy_namespace.$root_policy_name.$cluster_name.$random_number + if [[ $root_policy_status == "Compliant" ]]; then + # patch replicas policy status to compliant + kubectl patch policy $root_policy_namespace.$root_policy_name -n $cluster_name --type=merge --subresource status --patch "status: {compliant: Compliant, details: [{compliant: Compliant, history: [{eventName: $event_name, message: Compliant; notification - limitranges container-mem-limit-range found as specified in namespace $root_policy_namespace}], templateMeta: {creationTimestamp: null, name: policy-limitrange-container-mem-limit-range}}]}" & + else + kubectl patch policy $root_policy_namespace.$root_policy_name -n $cluster_name --type=merge --subresource status --patch "status: {compliant: NonCompliant, details: [{compliant: NonCompliant, history: [{eventName: $event_name, message: NonCompliant; violation - limitranges container-mem-limit-range not found in namespace $root_policy_namespace}], templateMeta: {creationTimestamp: null, name: policy-limitrange-container-mem-limit-range}}]}" & + fi + + if [ $j == 1 ];then + status="{clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: $root_policy_status}" + else + status="${status}, {clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: $root_policy_status}" + fi + + count=$(( $count + 1 )) + if (( count == concurrent )); then + wait + count=0 + fi + done + + wait + + # patch root policy status + kubectl patch policy $root_policy_name -n $root_policy_namespace --type=merge --subresource status --patch "status: {compliant: $root_policy_status, placement: [{placement: placement-$root_policy_name, placementBinding: binding-${root_policy_name}}], status: [${status}]}" +} + +for i in $(seq $policy_start $policy_end) +do + # path replicas policy: rootpolicy namespace, name and managed cluster + update_cluster_policies "rootpolicy-${i}" $compliance_state +done \ No newline at end of file diff --git a/doc/simulation/local-policies/setup-policy.sh b/doc/simulation/local-policies/setup-policy.sh index c21c1f441..07bd7ce44 100755 --- a/doc/simulation/local-policies/setup-policy.sh +++ b/doc/simulation/local-policies/setup-policy.sh @@ -2,53 +2,83 @@ # Copyright (c) 2023 Red Hat, Inc. # Copyright Contributors to the Open Cluster Management project -set -eo pipefail - +set -o pipefail ### This script is used to setup policy and placement for testing -### Usage: ./setup-policy.sh [kubeconfig] +### Usage: ./setup-policy.sh [kubeconfig] +if [ $# -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +IFS=':' read -r policy_start policy_end <<< "$1" +KUBECONFIG=$2 +echo ">> Generate policy ${policy_start}~${policy_end} on $KUBECONFIG" + +REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" CURRENT_DIR=$(cd "$(dirname "$0")" || exit;pwd) -KUBECONFIG=$3 -FROM_POLICY_IDX=${FROM_POLICY_IDX:-1} +kubectl apply -f $REPO_DIR/pkg/testdata/crds/0000_00_policy.open-cluster-management.io_policies.crd.yaml +kubectl apply -f $REPO_DIR/pkg/testdata/crds/0000_00_cluster.open-cluster-management.io_placements.crd.yaml +kubectl apply -f $REPO_DIR/pkg/testdata/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml source ${CURRENT_DIR}/policy.sh function generate_replicas_policy() { rootpolicy_name=$1 - cluster_num=$2 + cluster_start=$2 + cluster_end=$3 + + echo ">> Policy ${rootpolicy_name} is propagating to clusters $cluster_start~$cluster_end on $KUBECONFIG" # create root policy limit_range_policy $rootpolicy_name & # create replicas policy: rootpolicy namespace, name and managed cluster - for j in $(seq 1 $cluster_num); do - echo "Generating managedcluster-${j}/${rootpolicy_name} on $KUBECONFIG" + for j in $(seq $cluster_start $cluster_end); do + cluster_name=managedcluster-${j} + echo ">> Generating policy ${cluster_name}/${rootpolicy_name} on $KUBECONFIG" + + limit_range_replicas_policy default $rootpolicy_name ${cluster_name} - limit_range_replicas_policy default $rootpolicy_name managedcluster-${j} if [ $j == 1 ]; then - status="{clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: NonCompliant}" - decision="{clusterName: managedcluster-${j}, reason: ''}" + status="{clustername: $cluster_name, clusternamespace: $cluster_name, compliant: NonCompliant}" + decision="{clusterName: $cluster_name, reason: ''}" else - status="${status}, {clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: NonCompliant}" - decision="${decision}, {clusterName: managedcluster-${j}, reason: ''}" + status="${status}, {clustername: $cluster_name, clusternamespace: $cluster_name, compliant: NonCompliant}" + decision="${decision}, {clusterName: $cluster_name, reason: ''}" fi + done + wait + # patch root policy status - kubectl patch policy $rootpolicy_name -n default --type=merge --subresource status --patch "status: {compliant: NonCompliant, placement: [{placement: placement-roopolicy-${i}, placementBinding: binding-roopolicy-${i}}], status: [${status}]}" & + kubectl patch policy $rootpolicy_name -n default --type=merge --subresource status --patch "status: {compliant: NonCompliant, placement: [{placement: placement-$rootpolicy_name, placementBinding: binding-$rootpolicy_name}], status: [${status}]}" & # generate placement and placementdecision, each rootpolicy with a placement and placementdescision - generate_placement default placement-$rootpolicy_name & - # patch placementdecision status - kubectl patch placementdecision placement-${rootpolicy_name}-1 -n default --type=merge --subresource status --patch "status: {decisions: [${decision}]}" & - + generate_placement default placement-$rootpolicy_name "$decision" & + wait - echo "Rootpolicy ${rootpolicy_name} propagate to $cluster_num clusters on $KUBECONFIG" + echo ">> Policy ${rootpolicy_name} is propagated to clusters $cluster_start~$cluster_end on $KUBECONFIG" } -for i in $(seq $FROM_POLICY_IDX $1); do +sorted_clusters=$(kubectl get mcl | grep -oE 'managedcluster-[0-9]+' | awk -F"-" '{print $2}' | sort -n) +cluster_start=$(echo "$sorted_clusters" | head -n 1) +cluster_end=$(echo "$sorted_clusters" | tail -n 1) + + +sorted_policies=$(kubectl get policy -n default | grep 'NonCompliant' | grep -oE 'rootpolicy-[0-9]+' | awk -F"-" '{print $2}' | sort -n) +policy_last=$(echo "$sorted_policies" | tail -n 1) + +if [ -n "$policy_last" ] && [ "$policy_last" -gt 0 ]; then + policy_start=$((policy_last + 1)) + echo ">> policy_start reset to $((policy_last + 1)) for KUBECONFIG=$KUBECONFIG" +fi + +for i in $(seq ${policy_start} ${policy_end}); do + policy_name="rootpolicy-${i}" # create replicas policy: name and managed cluster - generate_replicas_policy rootpolicy-${i} $2 + generate_replicas_policy $policy_name $cluster_start $cluster_end done \ No newline at end of file diff --git a/doc/simulation/managed-clusters/setup-cluster.sh b/doc/simulation/managed-clusters/setup-cluster.sh index c63d0a76c..21f0fd399 100755 --- a/doc/simulation/managed-clusters/setup-cluster.sh +++ b/doc/simulation/managed-clusters/setup-cluster.sh @@ -7,12 +7,18 @@ set -eo pipefail +CURRENT_DIR=$(cd "$(dirname "$0")" || exit;pwd) +REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" + if [ $# -lt 2 ]; then cluster_id_prefix="1" # Set a default value of "1" if $2 is not provided else cluster_id_prefix="$2" # Use the provided value of $2 fi +# create the mcl crd +kubectl apply -f $REPO_DIR/pkg/testdata/crds/0000_00_cluster.open-cluster-management.io_managedclusters.crd.yaml + # creating the simulated managedcluster for i in $(seq 1 $1) do diff --git a/doc/simulation/setup/README.md b/doc/simulation/setup/README.md index b3940e1b6..51d5db4b4 100644 --- a/doc/simulation/setup/README.md +++ b/doc/simulation/setup/README.md @@ -5,28 +5,26 @@ You can execute the following script to create the hub clusters and join them into the global hub. To join these clusters to it, You must set the `KUBECONFIG` environment variable to enable these hubs can connect to the global hub. Besides, you also need to provide several parameters: ```bash -./doc/simulation/setup/setup-cluster.sh 2 2000 +./doc/simulation/setup/setup-cluster.sh 1:5 1:300 ``` -- `$1` - How many managed hub clusters will be created -- `$2` - How many managed cluster will be created on per managed hub -- `$3` - Which managed cluster to start on per managed hub, default value is `1` +- `$1` - - Managed hubs, from `hub1` to `hub5` +- `$2` - - Managed clusters on each hub, from `managedcluter-1` to `managedcluster-300` -That means create `5` managed hubs and each has `300` managed clusters. You can also run `./doc/simulation/managed-clusters/cleanup-cluster.sh 300` on each hub cluster to cleanup the generated managed clusters. +That means create `5` managed hubs and each has `300` managed clusters. ## Create the policies on the managed hub clusters Running the following script to create the policies on all the managed hubs. ```bash -./doc/simulation/setup/setup-policy.sh 5 50 300 +./doc/simulation/setup/setup-policy.sh 1:5 1:50 ``` -- `$1` - How many managed hub clusters to mock the polices -- `$2` - How many root policy will be created per managed hub cluster -- `$3` - How many managed cluster the root policy will be propagated to on each hub cluster +- `$1` - - Managed hubs, from `hub1` to `hub5` +- `$2` - - Policies on each hub, from `rootpoicy-1` to `rootpolicy-50` -That means the operation will run on the `5` managed hub concurrently. Each of them will create `50` root policies and propagate to the `300` managed clusters. So there will be `15000` replicas polices on the managed hub cluster. Likewise, you can execute `./doc/simulation/local-policies/cleanup-policy.sh 50 300` on each managed hub to delete the created polices. +That means the operation will run on the `5` managed hub concurrently. Each of them will create `50` root policies and propagate to the `300` managed clusters. So there will be `15000` replicas polices on the managed hub cluster. ## The Scale for Global Hub Test @@ -49,16 +47,16 @@ kubectl label mcl hub4 vendor=OpenShift --overwrite kubectl label mcl hub5 vendor=OpenShift --overwrite ``` -## Rotate the Status of Polcies +## Rotate the Status of policy You can run the following script to update the replicas policies status on each hub cluster. ```bash -# update the 50 root policy on the 300 cluster, and update the status to Compliant(default NonCompliant) -$ ./doc/simulation/setup/rotate-policy.sh 50 300 "Compliant" -# $ ./doc/simulation/setup/rotate-policy.sh 50 300 "NonCompliant" +# update the 1 ~ 50 root policy on all the clusters, and update the status to Compliant(default NonCompliant) +$ ./doc/simulation/setup/rotate-policy.sh 1:5 1:50 "Compliant" +# ./doc/simulation/setup/rotate-policy.sh 1:5 1:50 "NonCompliant" ``` -- `$1` - How many root policy status will route on per managed hub cluster -- `$2` - How many managed clusters will this `$1` poclies will rotate -- `$3` - The target compliance status -- `$4` - Optional: Specify how many processes can be executed concurrently \ No newline at end of file + +- `$1` - - Managed hubs, from `hub1` to `hub5` +- `$2` - - Policies on each hub, from `rootpoicy-1` to `rootpolicy-50` +- `$2` - The target compliance status \ No newline at end of file diff --git a/doc/simulation/setup/rotate-policy.sh b/doc/simulation/setup/rotate-policy.sh index 09804857e..a0ba74a0f 100755 --- a/doc/simulation/setup/rotate-policy.sh +++ b/doc/simulation/setup/rotate-policy.sh @@ -4,51 +4,38 @@ set -eo pipefail -### This script is used to setup policy and placement for testing -### Usage: ./rotate-policy.sh Compliant/NonCompliant - -CURRENT_DIR=$(cd "$(dirname "$0")" || exit;pwd) - -concurrent="${4:-1}" - -function update_cluster_policies() { - root_policy_namespace=default - root_policy_name=$1 - cluster_num=$2 - count=0 - - # path replicas policy: rootpolicy namespace, name and managed cluster - for j in $(seq 1 ${cluster_num}); do - cluster_name=managedcluster-${j} - - if [[ $3 == "Compliant" ]]; then - # patch replicas policy status to compliant - kubectl patch policy $root_policy_namespace.$root_policy_name -n $cluster_name --type=merge --subresource status --patch "status: {compliant: Compliant, details: [{compliant: Compliant, history: [{eventName: $root_policy_namespace.$root_policy_name.$cluster_name, message: Compliant; notification - limitranges container-mem-limit-range found as specified in namespace $root_policy_namespace}], templateMeta: {creationTimestamp: null, name: policy-limitrange-container-mem-limit-range}}]}" & - else - kubectl patch policy $root_policy_namespace.$root_policy_name -n $cluster_name --type=merge --subresource status --patch "status: {compliant: NonCompliant, details: [{compliant: NonCompliant, history: [{eventName: $root_policy_namespace.$root_policy_name.$cluster_name, message: NonCompliant; violation - limitranges container-mem-limit-range not found in namespace $root_policy_namespace}], templateMeta: {creationTimestamp: null, name: policy-limitrange-container-mem-limit-range}}]}" & - fi - - if [ $j == 1 ];then - status="{clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: $3}" - else - status="${status}, {clustername: managedcluster-${j}, clusternamespace: managedcluster-${j}, compliant: $3}" - fi - - count=$(( $count + 1 )) - if (( count == concurrent )); then - wait - count=0 - fi - done - - wait - - # patch root policy status - kubectl patch policy $root_policy_name -n $root_policy_namespace --type=merge --subresource status --patch "status: {compliant: $3, placement: [{placement: placement-$root_policy_name, placementBinding: binding-${root_policy_name}}], status: [${status}]}" -} - -for i in $(seq 1 $1) -do - # path replicas policy: rootpolicy namespace, name and managed cluster - update_cluster_policies "rootpolicy-${i}" $2 $3 +# Check if the script is provided with the correct number of positional parameters +if [ $# -ne 3 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Parse the parameter using the delimiter ":" +IFS=':' read -r hub_start hub_end <<< "$1" +IFS=':' read -r policy_start policy_end <<< "$2" +compliance_state=$3 + +echo ">> Rotate policy ${policy_start}~${policy_end} to $compliance_state on hub ${hub_start}~${hub_end}" + +REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" + +export KUBECONFIG=${KUBECONFIG} + +source ${REPO_DIR}/doc/simulation/local-policies/policy.sh +cluster_dir=${REPO_DIR}/doc/simulation/kubeconfig + +for i in $(seq $hub_start $hub_end); do + hub_cluster=hub$i + kubeconfig="${cluster_dir}/${hub_cluster}" + bash ${REPO_DIR}/doc/simulation/local-policies/rotate-policy.sh $2 $compliance_state $kubeconfig & +done + +wait + +# printing the clusters +echo "Access the clusters:" +for i in $(seq $hub_start $hub_end); do + cluster=hub${i} + kubeconfig="${cluster_dir}/${cluster}" + echo "export KUBECONFIG=${kubeconfig}" done \ No newline at end of file diff --git a/doc/simulation/setup/setup-cluster.sh b/doc/simulation/setup/setup-cluster.sh index 6c8638512..4bbfcc007 100755 --- a/doc/simulation/setup/setup-cluster.sh +++ b/doc/simulation/setup/setup-cluster.sh @@ -6,21 +6,32 @@ ### Usage: ./setup-cluster.sh set -eo pipefail +# Check if the number of positional parameters is not equal to 1 or not equal to 2 +if [ $# -ne 1 ] && [ $# -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Parse the parameter using the delimiter ":" +IFS=':' read -r hub_start hub_end <<< "$1" +IFS=':' read -r cluster_start cluster_end <<< "$2" + +echo ">> Generate cluster ${cluster_start}~${cluster_end} on hub ${hub_start}~${hub_end}" + REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" export KUBECONFIG=${KUBECONFIG} - cluster_dir="${REPO_DIR}/doc/simulation/kubeconfig" mkdir -p ${cluster_dir} # creating the simulated hub clusters -for i in $(seq 1 $1); do +for i in $(seq $hub_start $hub_end); do cluster=hub${i} if ! kind get clusters | grep -q "$cluster"; then - echo "Creating: $cluster..." + echo ">> Creating KinD cluster: $cluster..." kubeconfig="${cluster_dir}/${cluster}" kind create cluster --kubeconfig $kubeconfig --name ${cluster} & else - echo "Kind cluster '$cluster' already exists. Skipping creation." + echo ">> KinD cluster '$cluster' already exists. Skipping..." fi done @@ -28,7 +39,7 @@ wait # join the kind cluster to global hub # 1. create cluster on global hub -for i in $(seq 1 $1); do +for i in $(seq $hub_start $hub_end); do cluster="hub${i}" # on global hub: create cluster namespace, cluster, and KlusterletAddonConfig @@ -86,6 +97,16 @@ EOF kubectl apply --kubeconfig $kubeconfig -f $imports done +if [ $# -eq 1 ]; then + echo "Access the clusters:" + for i in $(seq $hub_start $hub_end); do + hub_cluster=hub${i} + kubeconfig="${cluster_dir}/${hub_cluster}" + echo "export KUBECONFIG=${kubeconfig}" + done + exit 0 +fi + function create_managed_cluster() { cluster_id=$1 cluster_name=$2 @@ -107,22 +128,21 @@ EOF } -start_index=${3:=1} -for j in $(seq $start_index $2); do # for each managed cluster on the hub - for i in $(seq 1 $1); do # for each hub cluster +for j in $(seq $cluster_start $cluster_end); do # for each managed cluster on the hub + for i in $(seq $hub_start $hub_end); do # for each hub cluster hub_cluster=hub${i} - name=managedcluster-${j} + cluster_name=managedcluster-${j} id=$(printf "%08d-0000-0000-0000-%012d" "${i}" "${j}") # "00000000-0000-0000-0000-000000000000" kubeconfig="${cluster_dir}/${hub_cluster}" - echo "Creating ${cluster}: $name..." - create_managed_cluster ${id} ${name} ${kubeconfig} & + echo ">> Creating ${hub_cluster}: ${cluster_name}..." + create_managed_cluster ${id} ${cluster_name} ${kubeconfig} & done wait # waitting create the cluster on each hub done # printing the clusters echo "Access the clusters:" -for i in $(seq 1 $1); do +for i in $(seq $hub_start $hub_end); do hub_cluster=hub${i} kubeconfig="${cluster_dir}/${hub_cluster}" echo "export KUBECONFIG=${kubeconfig}" diff --git a/doc/simulation/setup/setup-policy.sh b/doc/simulation/setup/setup-policy.sh index fd5c10c47..c433bdcbb 100755 --- a/doc/simulation/setup/setup-policy.sh +++ b/doc/simulation/setup/setup-policy.sh @@ -6,26 +6,35 @@ set -eo pipefail +# Check if the script is provided with the correct number of positional parameters +if [ $# -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Parse the parameter using the delimiter ":" +IFS=':' read -r hub_start hub_end <<< "$1" +IFS=':' read -r policy_start policy_end <<< "$2" + +echo ">> Generate policy ${policy_start}~${policy_end} on hub ${hub_start}~${hub_end}" + REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" +export KUBECONFIG=${KUBECONFIG} source ${REPO_DIR}/doc/simulation/local-policies/policy.sh - cluster_dir=${REPO_DIR}/doc/simulation/kubeconfig mkdir -p ${cluster_dir} -for i in $(seq 1 $1); do +for i in $(seq $hub_start $hub_end); do hub_cluster=hub$i - root_policy_num=$2 - cluster_num=$3 kubeconfig="${cluster_dir}/${hub_cluster}" - - bash ${REPO_DIR}/doc/simulation/local-policies/setup-policy.sh $root_policy_num $cluster_num $kubeconfig & + bash ${REPO_DIR}/doc/simulation/local-policies/setup-policy.sh $2 $kubeconfig & done wait # printing the clusters echo "Access the clusters:" -for i in $(seq 1 $1); do +for i in $(seq $hub_start $hub_end); do cluster=hub${i} kubeconfig="${cluster_dir}/${cluster}" echo "export KUBECONFIG=${kubeconfig}" diff --git a/doc/simulation/setup/stopwatch-compliance.sh b/doc/simulation/setup/stopwatch-compliance.sh deleted file mode 100755 index 2ec45be23..000000000 --- a/doc/simulation/setup/stopwatch-compliance.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-$1} -echo "KUBECONFIG=$KUBECONFIG" - -pgha="$(kubectl get pods -n multicluster-global-hub -l postgres-operator.crunchydata.com/role=master |grep postgres-pgha |awk '{print $1}' || true)" -if [ "$pgha" != "" ]; then - echo "database pod $pgha" -fi - -sql="SELECT TO_CHAR(NOW(), 'YYYY-MM-DD HH24:MI:SS') as time, compliance as status, COUNT(1) as count -FROM local_status.compliance -GROUP BY compliance;" - -while [ true ]; do - sleep 60 - # kubectl exec -it $pgha -c database -n multicluster-global-hub -- psql -U postgres -d hoh -c "$sql" - kubectl exec -t $pgha -c database -n multicluster-global-hub -- psql -U postgres -d hoh -t -c "$sql" -done diff --git a/doc/simulation/setup/stopwatch-initialization.sh b/doc/simulation/setup/stopwatch-initialization.sh deleted file mode 100755 index 3bf879060..000000000 --- a/doc/simulation/setup/stopwatch-initialization.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -export KUBECONFIG=${KUBECONFIG:-$1} -echo "KUBECONFIG=$KUBECONFIG" - -pgha="$(kubectl get pods -n multicluster-global-hub -l postgres-operator.crunchydata.com/role=master |grep postgres-pgha |awk '{print $1}' || true)" -if [ "$pgha" != "" ]; then - echo "database pod: $pgha" -fi - -sql="SELECT TO_CHAR(NOW(), 'YYYY-MM-DD HH24:MI:SS') as time, table_name, COUNT(1) AS count -FROM ( - SELECT 'cluster' AS table_name FROM status.managed_clusters - UNION ALL - SELECT 'compliance' AS table_name FROM local_status.compliance - UNION ALL - SELECT 'event' AS table_name FROM "event".local_policies -) AS subquery -GROUP BY table_name" - -# sql="SELECT TO_CHAR(NOW(), 'YYYY-MM-DD HH24:MI:SS'), COUNT(1) FROM status.managed_clusters" - -while [ true ]; do - sleep 0.4 - # kubectl exec -it $pgha -c database -n multicluster-global-hub-postgres -- psql -U postgres -d hoh -c "$sql" - kubectl exec -t $pgha -c database -n multicluster-global-hub -- psql -U postgres -d hoh -t -c "$sql" -done \ No newline at end of file