Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update: scripts #258

Merged
merged 3 commits into from
Nov 13, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 36 additions & 12 deletions ack-system/scripts/setup.sh
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,55 @@
# kludges
# TODO: ArgoCD Hooks

# get aws creds
get_aws_key(){
setup_namespace(){
NAMESPACE=${1}

oc new-project "${NAMESPACE}" 2>/dev/null || \
oc project "${NAMESPACE}"
}

ocp_aws_cluster(){
oc -n kube-system get secret/aws-creds -o name > /dev/null 2>&1 || return 1
}

ocp_aws_get_key(){
# get aws creds
export AWS_ACCESS_KEY_ID=$(oc -n kube-system extract secret/aws-creds --keys=aws_access_key_id --to=-)
export AWS_SECRET_ACCESS_KEY=$(oc -n kube-system extract secret/aws-creds --keys=aws_secret_access_key --to=-)
export AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-us-west-2}
ocp_aws_cluster || return 1

AWS_ACCESS_KEY_ID=$(oc -n kube-system extract secret/aws-creds --keys=aws_access_key_id --to=-)
AWS_SECRET_ACCESS_KEY=$(oc -n kube-system extract secret/aws-creds --keys=aws_secret_access_key --to=-)
AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION:-us-east-2}

export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY
export AWS_DEFAULT_REGION

echo "AWS_DEFAULT_REGION: ${AWS_DEFAULT_REGION}"
sleep 4
}

# create secrets for ack controllers
setup_ack_system(){
aws_setup_ack_system(){
NAMESPACE=ack-system

# manually create ack-system
setup_namespace "${NAMESPACE}"

for type in ec2 ecr iam s3 sagemaker
oc apply -k ../"${NAMESPACE}"/aggregate/popular

for type in ec2 ecr iam lambda route53 s3 sagemaker
do
# oc apply -k ../../ack-${type}-controller/operator/overlays/alpha

# create ack operator secrets with main creds
< ../../ack-${type}-controller/overlays/alpha/user-secrets-secret.yaml \
oc apply -k ../ack-${type}-controller/operator/overlays/alpha

if oc -n "${NAMESPACE}" get secret "${type}-user-secrets" -o name; then
echo "Found: ${type}-user-secrets - not replacing"
continue
fi

< ../ack-${type}-controller/operator/overlays/alpha/user-secrets-secret.yaml \
sed "s@UPDATE_AWS_ACCESS_KEY_ID@${AWS_ACCESS_KEY_ID}@; s@UPDATE_AWS_SECRET_ACCESS_KEY@${AWS_SECRET_ACCESS_KEY}@" | \
oc -n "${NAMESPACE}" apply -f -
done
}

ocp_aws_get_key
aws_setup_ack_system
12 changes: 6 additions & 6 deletions advanced-cluster-management/remove-acm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ KUBECTL=oc
# Force delete klusterlet
echo "attempt to delete klusterlet"
${KUBECTL} delete klusterlet klusterlet --timeout=60s
${KUBECTL} delete namespace ${KLUSTERLET_NAMESPACE} --wait=false
${KUBECTL} delete namespace "${KLUSTERLET_NAMESPACE}" --wait=false
echo "force removing klusterlet"
${KUBECTL} patch klusterlet klusterlet --type="json" -p '[{"op": "remove", "path":"/metadata/finalizers"}]'
echo "removing klusterlet crd"
Expand Down Expand Up @@ -52,14 +52,14 @@ component_crds=(

for crd in "${component_crds[@]}"; do
echo "force delete all CustomResourceDefinition ${crd} resources..."
for resource in `${KUBECTL} get ${crd} -o name -n ${OPERATOR_NAMESPACE}`; do
for resource in $(${KUBECTL} get "${crd}" -o name -n "${OPERATOR_NAMESPACE}"); do
echo "attempt to delete ${crd} resource ${resource}..."
${KUBECTL} delete ${resource} -n ${OPERATOR_NAMESPACE} --timeout=30s
${KUBECTL} delete "${resource}" -n "${OPERATOR_NAMESPACE}" --timeout=30s
echo "force remove ${crd} resource ${resource}..."
${KUBECTL} patch ${resource} -n ${OPERATOR_NAMESPACE} --type="json" -p '[{"op": "remove", "path":"/metadata/finalizers"}]'
${KUBECTL} patch "${resource}" -n "${OPERATOR_NAMESPACE}" --type="json" -p '[{"op": "remove", "path":"/metadata/finalizers"}]'
done
echo "force delete all CustomResourceDefinition ${crd} resources..."
${KUBECTL} delete crd ${crd}
${KUBECTL} delete crd "${crd}"
done

${KUBECTL} delete namespace ${OPERATOR_NAMESPACE}
${KUBECTL} delete namespace "${OPERATOR_NAMESPACE}"
1 change: 1 addition & 0 deletions advanced-cluster-security
83 changes: 44 additions & 39 deletions scripts/automate_operators.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,21 +18,21 @@ There is probably a better way to do this, but it should help create
the basic file structure needed for an operator.

functions:
get_all_pkg_manifests
get_all_pkg_manifests_details
save_all_pkg_manifests_details
pkg_manifests_get_all
pkg_manifests_get_all_details
pkg_manifests_save_all_details

# ex: rhods-operator
get_pkg_manifest_info rhods-operator
get_pkg_manifest_channels rhods-operator
get_pkg_manifest_description rhods-operator
pkg_manifest_get_info rhods-operator
pkg_manifest_get_channels rhods-operator
pkg_manifest_get_description rhods-operator

create_operator
create_all_operators
"
}

is_sourced() {
is_sourced(){
if [ -n "$ZSH_VERSION" ]; then
case $ZSH_EVAL_CONTEXT in *:file:*) return 0;; esac
else # Add additional POSIX-compatible shell names here, if needed.
Expand All @@ -41,17 +41,21 @@ is_sourced() {
return 1 # NOT sourced.
}

check_oc(){
echo "Are you on the right OCP cluster?"
ocp_check_login(){
oc whoami || return 1
oc cluster-info | head -n1
echo
}

oc whoami || exit 0
oc status
ocp_check_info(){
ocp_check_login || return 1

sleep 4
echo "NAMESPACE: $(oc project -q)"
sleep "${SLEEP_SECONDS:-8}"
}

# main script functions

# pkg.sh
# setup manifest info
MANIFEST_INFO="NAME:.status.packageName"
MANIFEST_INFO="${MANIFEST_INFO},NAMESPACE:.status.channels[0].currentCSVDesc.annotations.operatorframework\.io/suggested-namespace"
MANIFEST_INFO="${MANIFEST_INFO},CATALOG_SOURCE:.status.catalogSource"
Expand All @@ -69,33 +73,28 @@ BASIC_INFO="${BASIC_INFO},DISPLAY_NAME:.status.channels[0].currentCSVDesc.displa
BASIC_INFO="${BASIC_INFO},DEFAULT_CHANNEL:.status.defaultChannel"
BASIC_INFO="${BASIC_INFO},CATALOG_SOURCE:.status.catalogSource"

get_all_pkg_manifests(){
pkg_manifests_get_all(){
oc get packagemanifest \
-o custom-columns="${BASIC_INFO}" \
--sort-by='.status.catalogSource'
}

get_all_pkg_manifests_by_group(){
pkg_manifests_get_all_by_group(){
PKG_GROUP=${1:-Red Hat Operators}
get_all_pkg_manifests | grep "${PKG_GROUP}"
pkg_manifests_get_all | grep "${PKG_GROUP}"
}

get_all_pkg_manifests_names_only(){
get_all_pkg_manifests | grep -v NAME | awk '{print $1}'
pkg_manifests_get_all_names_only(){
pkg_manifests_get_all | grep -v NAME | awk '{print $1}'
}

get_all_pkg_manifests_details(){
pkg_manifests_get_all_details(){
oc get packagemanifest \
--sort-by='.status.packageName' \
-o custom-columns="${MANIFEST_INFO}"
}

save_all_pkg_manifests_details(){
echo -e "# created: $(date -u)\n# script: dump_operator_info" > operator_info.txt
get_all_pkg_manifests_details >> operator_info.txt
}

get_pkg_manifest_info(){
pkg_manifest_get_info(){
[ "${1}x" == "x" ] && return
NAME="${1}"

Expand All @@ -104,7 +103,7 @@ get_pkg_manifest_info(){
-o=custom-columns="${MANIFEST_INFO}"
}

get_pkg_manifest_channels(){
pkg_manifest_get_channels(){
[ "${1}x" == "x" ] && return
NAME="${1}"

Expand All @@ -114,7 +113,7 @@ get_pkg_manifest_channels(){
-o=jsonpath='{range .status.channels[*]}{.name}{"\n"}{end}' | sort
}

get_pkg_manifest_description(){
pkg_manifest_get_description(){
[ "${1}x" == "x" ] && return
NAME="${1}"

Expand All @@ -124,6 +123,11 @@ get_pkg_manifest_description(){
-o=jsonpath="{.status.channels[0].currentCSVDesc.description}"
}

pkg_manifests_save_all_details(){
echo -e "# created: $(date -u)\n# script: dump_operator_info" > operator_info.txt
pkg_manifests_get_all_details >> operator_info.txt
}

create_operator_base(){
[ "${1}x" == "x" ] && return
NAME="${1}"
Expand All @@ -135,22 +139,22 @@ create_operator_base(){

echo "create_operator_base:" "${@}"

if [ "${NS_OWN}" == "false" ] && [ "${NAMESPACE}" == "openshift-operators" ]; then
if [ "${NS_OWN}" == "false" ] && [ "${NAMESPACE}" == "<none>" ]; then
BASE_DIR="${NAME}"
create_operator_base_files_wo_ns
elif [ ! "${NAMESPACE}" == "<none>" ] && [ ! "${NS_OWN}" == "<none>" ]; then
elif [ "${NS_OWN}" == "true" ] && [ "${NAMESPACE}" == "<none>" ]; then
BASE_DIR="${NAME}"
NAMESPACE="${NAME}"
create_operator_base_files_w_ns
elif [ "${NAMESPACE}" == "<none>" ] && [ "${NS_OWN}" == "true" ]; then
elif [ ! "${NS_OWN}" == "<none>" ] && [ ! "${NAMESPACE}" == "<none>" ]; then
BASE_DIR="${NAME}"
NAMESPACE="${NAME}"
create_operator_base_files_w_ns
else
BASE_DIR="${NAME}"
create_operator_base_files_wo_ns
fi

get_pkg_manifest_description "${NAME}" > "${BASE_DIR}/INFO.md"
pkg_manifest_get_description "${NAME}" > "${BASE_DIR}/INFO.md"

}

Expand All @@ -164,6 +168,7 @@ create_operator_dir(){
}

create_operator_base_files_wo_ns(){
[ "${NAMESPACE}" == "<none>" ] && NAMESPACE=openshift-operators
echo "create operator w/o ns"

create_operator_dir "${BASE_DIR}"
Expand Down Expand Up @@ -253,7 +258,7 @@ metadata:
namespace: ${NAMESPACE}
YAML

if [ "${NS_OWN}" == "true" ]; then
if [ "${NS_SINGLE}" == "true" ] && [ "${NS_ALL}" != "true" ] || [ "${NS_MULTI}" != "false" ]; then
echo -n "spec:
targetNamespaces:
- ${NAMESPACE}
Expand Down Expand Up @@ -296,7 +301,7 @@ create_operator_overlays(){
NAME="${1}"
BASE_PATH="${BASE_DIR}/operator/overlays"

for channel in $(get_pkg_manifest_channels "${NAME}" | grep -v NAME)
for channel in $(pkg_manifest_get_channels "${NAME}" | grep -v NAME)
do
echo "overlay: ${channel}"
create_operator_overlay_files "${channel}"
Expand All @@ -317,7 +322,7 @@ Do not use the \`base\` directory directly, as you will need to patch the \`chan

The current *overlays* available are for the following channels:

$(for channel in $(get_pkg_manifest_channels "${NAME}" | grep -v NAME)
$(for channel in $(pkg_manifest_get_channels "${NAME}" | grep -v NAME)
do
echo "* [${channel}](operator/overlays/${channel})"
done
Expand Down Expand Up @@ -353,7 +358,7 @@ create_operator(){
[ "${1}x" == "x" ] && return
NAME="${1}"

read -r NAME NAMESPACE CATALOG_SOURCE SOURCE_NAMESPACE DEFAULT_CHANNEL CHANNELS NS_OWN NS_SINGLE NS_MULTI NS_ALL DISPLAY_NAME <<<"$(get_pkg_manifest_info "${NAME}" | grep -v NAME)"
read -r NAME NAMESPACE CATALOG_SOURCE SOURCE_NAMESPACE DEFAULT_CHANNEL CHANNELS NS_OWN NS_SINGLE NS_MULTI NS_ALL DISPLAY_NAME <<<"$(pkg_manifest_get_info "${NAME}" | grep -v NAME)"

if [ -z "$DEBUG" ]; then
echo "NAME: ${NAME}"
Expand All @@ -377,11 +382,11 @@ create_operator(){

create_all_operators(){
for package in nfd opendatahub-operator serverless-operator openshift-gitops-operator rhods-operator
# for package in $(get_all_pkg_manifests_names_only)
# for package in $(pkg_manifests_get_all_names_only)
do
create_operator "${package}"
done
}

# shellcheck disable=SC2015
is_sourced && usage || get_all_pkg_manifests
is_sourced && usage || pkg_manifests_get_all
36 changes: 27 additions & 9 deletions scripts/lint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,49 @@
# shellcheck disable=SC2015,SC1091
set -e

setup_venv(){
usage(){
echo "
usage: scripts/lint.sh
"
}

py_setup_venv(){
python3 -m venv venv
source venv/bin/activate
pip install -q -U pip

check_venv || return
py_check_venv || usage
}

check_venv(){
py_check_venv(){
# activate python venv
[ -d venv ] && . venv/bin/activate || setup_venv
[ -d venv ] && . venv/bin/activate || py_setup_venv
[ -e requirements.txt ] && pip install -q -r requirements.txt
}

# activate python venv
check_venv
py_bin_checks(){
which python || exit 0
which pip || exit 0
}

py_check_venv
py_bin_checks

# chcek scripts
which shellcheck && shellcheck scripts/*
which shellcheck && \
find . -name '*.sh' -print0 | xargs shellcheck

# check spelling
pyspelling -c .spellcheck.yaml
which aspell && \
[ -e .pyspelling.yml ] && \
pyspelling -c .pyspelling.yml

# check Dockerfiles
which hadolint && \
find . -not -path "./scratch/*" \( -name Dockerfile -o -name Containerfile \) -exec hadolint {} \;

# check yaml
yamllint . && echo "YAML check passed :)"

# validate manifests
scripts/validate_manifests.sh
[ -e scripts/validate_manifests.sh ] && scripts/validate_manifests.sh
1 change: 1 addition & 0 deletions scripts/validate_manifests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ process_kustomization(){

# echo "$KUSTOMIZE_BUILD_OUTPUT" | kubeval ${IGNORE_MISSING_SCHEMAS} --schema-location="file://${SCHEMA_LOCATION}" --force-color
KUSTOMIZE_BUILD_OUTPUT=$(${KUSTOMIZE_CMD} "${BUILD}")

build_response=$?

if [ $build_response -ne 0 ]; then
Expand Down
Loading